FreeBSD/Linux Kernel Cross Reference
sys/net/pf_ioctl.c
1 /* $OpenBSD: pf_ioctl.c,v 1.397 2023/01/06 17:44:34 sashan Exp $ */
2
3 /*
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002 - 2018 Henning Brauer <henning@openbsd.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * - Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * - Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 *
32 * Effort sponsored in part by the Defense Advanced Research Projects
33 * Agency (DARPA) and Air Force Research Laboratory, Air Force
34 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35 *
36 */
37
38 #include "pfsync.h"
39 #include "pflog.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/mbuf.h>
45 #include <sys/filio.h>
46 #include <sys/fcntl.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/kernel.h>
50 #include <sys/time.h>
51 #include <sys/timeout.h>
52 #include <sys/pool.h>
53 #include <sys/malloc.h>
54 #include <sys/proc.h>
55 #include <sys/rwlock.h>
56 #include <sys/syslog.h>
57 #include <sys/specdev.h>
58 #include <uvm/uvm_extern.h>
59
60 #include <crypto/md5.h>
61
62 #include <net/if.h>
63 #include <net/if_var.h>
64 #include <net/route.h>
65 #include <net/hfsc.h>
66 #include <net/fq_codel.h>
67
68 #include <netinet/in.h>
69 #include <netinet/ip.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/ip_var.h>
72 #include <netinet/ip_icmp.h>
73 #include <netinet/tcp.h>
74 #include <netinet/udp.h>
75
76 #ifdef INET6
77 #include <netinet/ip6.h>
78 #include <netinet/icmp6.h>
79 #endif /* INET6 */
80
81 #include <net/pfvar.h>
82 #include <net/pfvar_priv.h>
83
84 #if NPFSYNC > 0
85 #include <netinet/ip_ipsp.h>
86 #include <net/if_pfsync.h>
87 #endif /* NPFSYNC > 0 */
88
89 struct pool pf_tag_pl;
90
91 void pfattach(int);
92 void pf_thread_create(void *);
93 int pfopen(dev_t, int, int, struct proc *);
94 int pfclose(dev_t, int, int, struct proc *);
95 int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
96 int pf_begin_rules(u_int32_t *, const char *);
97 void pf_rollback_rules(u_int32_t, char *);
98 void pf_remove_queues(void);
99 int pf_commit_queues(void);
100 void pf_free_queues(struct pf_queuehead *);
101 void pf_calc_chksum(struct pf_ruleset *);
102 void pf_hash_rule(MD5_CTX *, struct pf_rule *);
103 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
104 int pf_commit_rules(u_int32_t, char *);
105 int pf_addr_setup(struct pf_ruleset *,
106 struct pf_addr_wrap *, sa_family_t);
107 struct pfi_kif *pf_kif_setup(struct pfi_kif *);
108 void pf_addr_copyout(struct pf_addr_wrap *);
109 void pf_trans_set_commit(void);
110 void pf_pool_copyin(struct pf_pool *, struct pf_pool *);
111 int pf_validate_range(u_int8_t, u_int16_t[2], int);
112 int pf_rule_copyin(struct pf_rule *, struct pf_rule *);
113 int pf_rule_checkaf(struct pf_rule *);
114 u_int16_t pf_qname2qid(char *, int);
115 void pf_qid2qname(u_int16_t, char *);
116 void pf_qid_unref(u_int16_t);
117 int pf_states_clr(struct pfioc_state_kill *);
118 int pf_states_get(struct pfioc_states *);
119
120 struct pf_rule pf_default_rule, pf_default_rule_new;
121
122 struct {
123 char statusif[IFNAMSIZ];
124 u_int32_t debug;
125 u_int32_t hostid;
126 u_int32_t reass;
127 u_int32_t mask;
128 } pf_trans_set;
129
130 #define PF_ORDER_HOST 0
131 #define PF_ORDER_NET 1
132
133 #define PF_TSET_STATUSIF 0x01
134 #define PF_TSET_DEBUG 0x02
135 #define PF_TSET_HOSTID 0x04
136 #define PF_TSET_REASS 0x08
137
138 #define TAGID_MAX 50000
139 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
140 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
141
142 /*
143 * pf_lock protects consistency of PF data structures, which don't have
144 * their dedicated lock yet. The pf_lock currently protects:
145 * - rules,
146 * - radix tables,
147 * - source nodes
148 * All callers must grab pf_lock exclusively.
149 *
150 * pf_state_lock protects consistency of state table. Packets, which do state
151 * look up grab the lock as readers. If packet must create state, then it must
152 * grab the lock as writer. Whenever packet creates state it grabs pf_lock
153 * first then it locks pf_state_lock as the writer.
154 */
155 struct rwlock pf_lock = RWLOCK_INITIALIZER("pf_lock");
156 struct rwlock pf_state_lock = RWLOCK_INITIALIZER("pf_state_lock");
157 struct rwlock pfioctl_rw = RWLOCK_INITIALIZER("pfioctl_rw");
158
159 struct cpumem *pf_anchor_stack;
160
161 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
162 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
163 #endif
164 u_int16_t tagname2tag(struct pf_tags *, char *, int);
165 void tag2tagname(struct pf_tags *, u_int16_t, char *);
166 void tag_unref(struct pf_tags *, u_int16_t);
167 int pf_rtlabel_add(struct pf_addr_wrap *);
168 void pf_rtlabel_remove(struct pf_addr_wrap *);
169 void pf_rtlabel_copyout(struct pf_addr_wrap *);
170
171
172 void
173 pfattach(int num)
174 {
175 u_int32_t *timeout = pf_default_rule.timeout;
176 struct pf_anchor_stackframe *sf;
177 struct cpumem_iter cmi;
178
179 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0,
180 IPL_SOFTNET, 0, "pfrule", NULL);
181 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0,
182 IPL_SOFTNET, 0, "pfsrctr", NULL);
183 pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0,
184 IPL_SOFTNET, 0, "pfsnitem", NULL);
185 pool_init(&pf_state_pl, sizeof(struct pf_state), 0,
186 IPL_SOFTNET, 0, "pfstate", NULL);
187 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0,
188 IPL_SOFTNET, 0, "pfstkey", NULL);
189 pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0,
190 IPL_SOFTNET, 0, "pfstitem", NULL);
191 pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0,
192 IPL_SOFTNET, 0, "pfruleitem", NULL);
193 pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0,
194 IPL_SOFTNET, 0, "pfqueue", NULL);
195 pool_init(&pf_tag_pl, sizeof(struct pf_tagname), 0,
196 IPL_SOFTNET, 0, "pftag", NULL);
197 pool_init(&pf_pktdelay_pl, sizeof(struct pf_pktdelay), 0,
198 IPL_SOFTNET, 0, "pfpktdelay", NULL);
199 pool_init(&pf_anchor_pl, sizeof(struct pf_anchor), 0,
200 IPL_SOFTNET, 0, "pfanchor", NULL);
201
202 hfsc_initialize();
203 pfr_initialize();
204 pfi_initialize();
205 pf_osfp_initialize();
206 pf_syncookies_init();
207
208 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
209 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
210 pool_sethardlimit(pf_pool_limits[PF_LIMIT_ANCHORS].pp,
211 pf_pool_limits[PF_LIMIT_ANCHORS].limit, NULL, 0);
212
213 if (physmem <= atop(100*1024*1024))
214 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
215 PFR_KENTRY_HIWAT_SMALL;
216
217 RB_INIT(&tree_src_tracking);
218 RB_INIT(&pf_anchors);
219 pf_init_ruleset(&pf_main_ruleset);
220 TAILQ_INIT(&pf_queues[0]);
221 TAILQ_INIT(&pf_queues[1]);
222 pf_queues_active = &pf_queues[0];
223 pf_queues_inactive = &pf_queues[1];
224
225 /* default rule should never be garbage collected */
226 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
227 pf_default_rule.action = PF_PASS;
228 pf_default_rule.nr = (u_int32_t)-1;
229 pf_default_rule.rtableid = -1;
230
231 /* initialize default timeouts */
232 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
233 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
234 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
235 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
236 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
237 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
238 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
239 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
240 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
241 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
242 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
243 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
244 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
245 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
246 timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
247 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
248 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
249 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
250 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
251 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
252
253 pf_default_rule.src.addr.type = PF_ADDR_ADDRMASK;
254 pf_default_rule.dst.addr.type = PF_ADDR_ADDRMASK;
255 pf_default_rule.rdr.addr.type = PF_ADDR_NONE;
256 pf_default_rule.nat.addr.type = PF_ADDR_NONE;
257 pf_default_rule.route.addr.type = PF_ADDR_NONE;
258
259 pf_normalize_init();
260 memset(&pf_status, 0, sizeof(pf_status));
261 pf_status.debug = LOG_ERR;
262 pf_status.reass = PF_REASS_ENABLED;
263
264 /* XXX do our best to avoid a conflict */
265 pf_status.hostid = arc4random();
266
267 pf_default_rule_new = pf_default_rule;
268
269 /*
270 * we waste two stack frames as meta-data.
271 * frame[0] always presents a top, which can not be used for data
272 * frame[PF_ANCHOR_STACK_MAX] denotes a bottom of the stack and keeps
273 * the pointer to currently used stack frame.
274 */
275 pf_anchor_stack = cpumem_malloc(
276 sizeof(struct pf_anchor_stackframe) * (PF_ANCHOR_STACK_MAX + 2),
277 M_WAITOK|M_ZERO);
278 CPUMEM_FOREACH(sf, &cmi, pf_anchor_stack)
279 sf[PF_ANCHOR_STACK_MAX].sf_stack_top = &sf[0];
280 }
281
282 int
283 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
284 {
285 int unit = minor(dev);
286
287 if (unit & ((1 << CLONE_SHIFT) - 1))
288 return (ENXIO);
289
290 return (0);
291 }
292
293 int
294 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
295 {
296 return (0);
297 }
298
299 void
300 pf_rule_free(struct pf_rule *rule)
301 {
302 if (rule == NULL)
303 return;
304
305 pfi_kif_free(rule->kif);
306 pfi_kif_free(rule->rcv_kif);
307 pfi_kif_free(rule->rdr.kif);
308 pfi_kif_free(rule->nat.kif);
309 pfi_kif_free(rule->route.kif);
310
311 pool_put(&pf_rule_pl, rule);
312 }
313
314 void
315 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
316 {
317 if (rulequeue != NULL) {
318 if (rule->states_cur == 0 && rule->src_nodes == 0) {
319 /*
320 * XXX - we need to remove the table *before* detaching
321 * the rule to make sure the table code does not delete
322 * the anchor under our feet.
323 */
324 pf_tbladdr_remove(&rule->src.addr);
325 pf_tbladdr_remove(&rule->dst.addr);
326 pf_tbladdr_remove(&rule->rdr.addr);
327 pf_tbladdr_remove(&rule->nat.addr);
328 pf_tbladdr_remove(&rule->route.addr);
329 if (rule->overload_tbl)
330 pfr_detach_table(rule->overload_tbl);
331 }
332 TAILQ_REMOVE(rulequeue, rule, entries);
333 rule->entries.tqe_prev = NULL;
334 rule->nr = (u_int32_t)-1;
335 }
336
337 if (rule->states_cur > 0 || rule->src_nodes > 0 ||
338 rule->entries.tqe_prev != NULL)
339 return;
340 pf_tag_unref(rule->tag);
341 pf_tag_unref(rule->match_tag);
342 pf_rtlabel_remove(&rule->src.addr);
343 pf_rtlabel_remove(&rule->dst.addr);
344 pfi_dynaddr_remove(&rule->src.addr);
345 pfi_dynaddr_remove(&rule->dst.addr);
346 pfi_dynaddr_remove(&rule->rdr.addr);
347 pfi_dynaddr_remove(&rule->nat.addr);
348 pfi_dynaddr_remove(&rule->route.addr);
349 if (rulequeue == NULL) {
350 pf_tbladdr_remove(&rule->src.addr);
351 pf_tbladdr_remove(&rule->dst.addr);
352 pf_tbladdr_remove(&rule->rdr.addr);
353 pf_tbladdr_remove(&rule->nat.addr);
354 pf_tbladdr_remove(&rule->route.addr);
355 if (rule->overload_tbl)
356 pfr_detach_table(rule->overload_tbl);
357 }
358 pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE);
359 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
360 pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE);
361 pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE);
362 pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE);
363 pf_remove_anchor(rule);
364 pool_put(&pf_rule_pl, rule);
365 }
366
367 u_int16_t
368 tagname2tag(struct pf_tags *head, char *tagname, int create)
369 {
370 struct pf_tagname *tag, *p = NULL;
371 u_int16_t new_tagid = 1;
372
373 TAILQ_FOREACH(tag, head, entries)
374 if (strcmp(tagname, tag->name) == 0) {
375 tag->ref++;
376 return (tag->tag);
377 }
378
379 if (!create)
380 return (0);
381
382 /*
383 * to avoid fragmentation, we do a linear search from the beginning
384 * and take the first free slot we find. if there is none or the list
385 * is empty, append a new entry at the end.
386 */
387
388 /* new entry */
389 TAILQ_FOREACH(p, head, entries) {
390 if (p->tag != new_tagid)
391 break;
392 new_tagid = p->tag + 1;
393 }
394
395 if (new_tagid > TAGID_MAX)
396 return (0);
397
398 /* allocate and fill new struct pf_tagname */
399 tag = pool_get(&pf_tag_pl, PR_NOWAIT | PR_ZERO);
400 if (tag == NULL)
401 return (0);
402 strlcpy(tag->name, tagname, sizeof(tag->name));
403 tag->tag = new_tagid;
404 tag->ref++;
405
406 if (p != NULL) /* insert new entry before p */
407 TAILQ_INSERT_BEFORE(p, tag, entries);
408 else /* either list empty or no free slot in between */
409 TAILQ_INSERT_TAIL(head, tag, entries);
410
411 return (tag->tag);
412 }
413
414 void
415 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
416 {
417 struct pf_tagname *tag;
418
419 TAILQ_FOREACH(tag, head, entries)
420 if (tag->tag == tagid) {
421 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
422 return;
423 }
424 }
425
426 void
427 tag_unref(struct pf_tags *head, u_int16_t tag)
428 {
429 struct pf_tagname *p, *next;
430
431 if (tag == 0)
432 return;
433
434 TAILQ_FOREACH_SAFE(p, head, entries, next) {
435 if (tag == p->tag) {
436 if (--p->ref == 0) {
437 TAILQ_REMOVE(head, p, entries);
438 pool_put(&pf_tag_pl, p);
439 }
440 break;
441 }
442 }
443 }
444
445 u_int16_t
446 pf_tagname2tag(char *tagname, int create)
447 {
448 return (tagname2tag(&pf_tags, tagname, create));
449 }
450
451 void
452 pf_tag2tagname(u_int16_t tagid, char *p)
453 {
454 tag2tagname(&pf_tags, tagid, p);
455 }
456
457 void
458 pf_tag_ref(u_int16_t tag)
459 {
460 struct pf_tagname *t;
461
462 TAILQ_FOREACH(t, &pf_tags, entries)
463 if (t->tag == tag)
464 break;
465 if (t != NULL)
466 t->ref++;
467 }
468
469 void
470 pf_tag_unref(u_int16_t tag)
471 {
472 tag_unref(&pf_tags, tag);
473 }
474
475 int
476 pf_rtlabel_add(struct pf_addr_wrap *a)
477 {
478 if (a->type == PF_ADDR_RTLABEL &&
479 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
480 return (-1);
481 return (0);
482 }
483
484 void
485 pf_rtlabel_remove(struct pf_addr_wrap *a)
486 {
487 if (a->type == PF_ADDR_RTLABEL)
488 rtlabel_unref(a->v.rtlabel);
489 }
490
491 void
492 pf_rtlabel_copyout(struct pf_addr_wrap *a)
493 {
494 const char *name;
495
496 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
497 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
498 strlcpy(a->v.rtlabelname, "?",
499 sizeof(a->v.rtlabelname));
500 else
501 strlcpy(a->v.rtlabelname, name,
502 sizeof(a->v.rtlabelname));
503 }
504 }
505
506 u_int16_t
507 pf_qname2qid(char *qname, int create)
508 {
509 return (tagname2tag(&pf_qids, qname, create));
510 }
511
512 void
513 pf_qid2qname(u_int16_t qid, char *p)
514 {
515 tag2tagname(&pf_qids, qid, p);
516 }
517
518 void
519 pf_qid_unref(u_int16_t qid)
520 {
521 tag_unref(&pf_qids, (u_int16_t)qid);
522 }
523
524 int
525 pf_begin_rules(u_int32_t *ticket, const char *anchor)
526 {
527 struct pf_ruleset *rs;
528 struct pf_rule *rule;
529
530 if ((rs = pf_find_or_create_ruleset(anchor)) == NULL)
531 return (EINVAL);
532 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
533 pf_rm_rule(rs->rules.inactive.ptr, rule);
534 rs->rules.inactive.rcount--;
535 }
536 *ticket = ++rs->rules.inactive.ticket;
537 rs->rules.inactive.open = 1;
538 return (0);
539 }
540
541 void
542 pf_rollback_rules(u_int32_t ticket, char *anchor)
543 {
544 struct pf_ruleset *rs;
545 struct pf_rule *rule;
546
547 rs = pf_find_ruleset(anchor);
548 if (rs == NULL || !rs->rules.inactive.open ||
549 rs->rules.inactive.ticket != ticket)
550 return;
551 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
552 pf_rm_rule(rs->rules.inactive.ptr, rule);
553 rs->rules.inactive.rcount--;
554 }
555 rs->rules.inactive.open = 0;
556
557 /* queue defs only in the main ruleset */
558 if (anchor[0])
559 return;
560
561 pf_free_queues(pf_queues_inactive);
562 }
563
564 void
565 pf_free_queues(struct pf_queuehead *where)
566 {
567 struct pf_queuespec *q, *qtmp;
568
569 TAILQ_FOREACH_SAFE(q, where, entries, qtmp) {
570 TAILQ_REMOVE(where, q, entries);
571 pfi_kif_unref(q->kif, PFI_KIF_REF_RULE);
572 pool_put(&pf_queue_pl, q);
573 }
574 }
575
576 void
577 pf_remove_queues(void)
578 {
579 struct pf_queuespec *q;
580 struct ifnet *ifp;
581
582 /* put back interfaces in normal queueing mode */
583 TAILQ_FOREACH(q, pf_queues_active, entries) {
584 if (q->parent_qid != 0)
585 continue;
586
587 ifp = q->kif->pfik_ifp;
588 if (ifp == NULL)
589 continue;
590
591 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL);
592 }
593 }
594
595 struct pf_queue_if {
596 struct ifnet *ifp;
597 const struct ifq_ops *ifqops;
598 const struct pfq_ops *pfqops;
599 void *disc;
600 struct pf_queue_if *next;
601 };
602
603 static inline struct pf_queue_if *
604 pf_ifp2q(struct pf_queue_if *list, struct ifnet *ifp)
605 {
606 struct pf_queue_if *qif = list;
607
608 while (qif != NULL) {
609 if (qif->ifp == ifp)
610 return (qif);
611
612 qif = qif->next;
613 }
614
615 return (qif);
616 }
617
618 int
619 pf_create_queues(void)
620 {
621 struct pf_queuespec *q;
622 struct ifnet *ifp;
623 struct pf_queue_if *list = NULL, *qif;
624 int error;
625
626 /*
627 * Find root queues and allocate traffic conditioner
628 * private data for these interfaces
629 */
630 TAILQ_FOREACH(q, pf_queues_active, entries) {
631 if (q->parent_qid != 0)
632 continue;
633
634 ifp = q->kif->pfik_ifp;
635 if (ifp == NULL)
636 continue;
637
638 qif = malloc(sizeof(*qif), M_TEMP, M_WAITOK);
639 qif->ifp = ifp;
640
641 if (q->flags & PFQS_ROOTCLASS) {
642 qif->ifqops = ifq_hfsc_ops;
643 qif->pfqops = pfq_hfsc_ops;
644 } else {
645 qif->ifqops = ifq_fqcodel_ops;
646 qif->pfqops = pfq_fqcodel_ops;
647 }
648
649 qif->disc = qif->pfqops->pfq_alloc(ifp);
650
651 qif->next = list;
652 list = qif;
653 }
654
655 /* and now everything */
656 TAILQ_FOREACH(q, pf_queues_active, entries) {
657 ifp = q->kif->pfik_ifp;
658 if (ifp == NULL)
659 continue;
660
661 qif = pf_ifp2q(list, ifp);
662 KASSERT(qif != NULL);
663
664 error = qif->pfqops->pfq_addqueue(qif->disc, q);
665 if (error != 0)
666 goto error;
667 }
668
669 /* find root queues in old list to disable them if necessary */
670 TAILQ_FOREACH(q, pf_queues_inactive, entries) {
671 if (q->parent_qid != 0)
672 continue;
673
674 ifp = q->kif->pfik_ifp;
675 if (ifp == NULL)
676 continue;
677
678 qif = pf_ifp2q(list, ifp);
679 if (qif != NULL)
680 continue;
681
682 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL);
683 }
684
685 /* commit the new queues */
686 while (list != NULL) {
687 qif = list;
688 list = qif->next;
689
690 ifp = qif->ifp;
691
692 ifq_attach(&ifp->if_snd, qif->ifqops, qif->disc);
693 free(qif, M_TEMP, sizeof(*qif));
694 }
695
696 return (0);
697
698 error:
699 while (list != NULL) {
700 qif = list;
701 list = qif->next;
702
703 qif->pfqops->pfq_free(qif->disc);
704 free(qif, M_TEMP, sizeof(*qif));
705 }
706
707 return (error);
708 }
709
710 int
711 pf_commit_queues(void)
712 {
713 struct pf_queuehead *qswap;
714 int error;
715
716 /* swap */
717 qswap = pf_queues_active;
718 pf_queues_active = pf_queues_inactive;
719 pf_queues_inactive = qswap;
720
721 error = pf_create_queues();
722 if (error != 0) {
723 pf_queues_inactive = pf_queues_active;
724 pf_queues_active = qswap;
725 return (error);
726 }
727
728 pf_free_queues(pf_queues_inactive);
729
730 return (0);
731 }
732
733 const struct pfq_ops *
734 pf_queue_manager(struct pf_queuespec *q)
735 {
736 if (q->flags & PFQS_FLOWQUEUE)
737 return pfq_fqcodel_ops;
738 return (/* pfq_default_ops */ NULL);
739 }
740
741 #define PF_MD5_UPD(st, elm) \
742 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
743
744 #define PF_MD5_UPD_STR(st, elm) \
745 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
746
747 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
748 (stor) = htonl((st)->elm); \
749 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
750 } while (0)
751
752 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
753 (stor) = htons((st)->elm); \
754 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
755 } while (0)
756
757 void
758 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
759 {
760 PF_MD5_UPD(pfr, addr.type);
761 switch (pfr->addr.type) {
762 case PF_ADDR_DYNIFTL:
763 PF_MD5_UPD(pfr, addr.v.ifname);
764 PF_MD5_UPD(pfr, addr.iflags);
765 break;
766 case PF_ADDR_TABLE:
767 if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX,
768 strlen(PF_OPTIMIZER_TABLE_PFX)))
769 PF_MD5_UPD(pfr, addr.v.tblname);
770 break;
771 case PF_ADDR_ADDRMASK:
772 /* XXX ignore af? */
773 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
774 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
775 break;
776 case PF_ADDR_RTLABEL:
777 PF_MD5_UPD(pfr, addr.v.rtlabelname);
778 break;
779 }
780
781 PF_MD5_UPD(pfr, port[0]);
782 PF_MD5_UPD(pfr, port[1]);
783 PF_MD5_UPD(pfr, neg);
784 PF_MD5_UPD(pfr, port_op);
785 }
786
787 void
788 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
789 {
790 u_int16_t x;
791 u_int32_t y;
792
793 pf_hash_rule_addr(ctx, &rule->src);
794 pf_hash_rule_addr(ctx, &rule->dst);
795 PF_MD5_UPD_STR(rule, label);
796 PF_MD5_UPD_STR(rule, ifname);
797 PF_MD5_UPD_STR(rule, rcv_ifname);
798 PF_MD5_UPD_STR(rule, match_tagname);
799 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
800 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
801 PF_MD5_UPD_HTONL(rule, prob, y);
802 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
803 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
804 PF_MD5_UPD(rule, uid.op);
805 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
806 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
807 PF_MD5_UPD(rule, gid.op);
808 PF_MD5_UPD_HTONL(rule, rule_flag, y);
809 PF_MD5_UPD(rule, action);
810 PF_MD5_UPD(rule, direction);
811 PF_MD5_UPD(rule, af);
812 PF_MD5_UPD(rule, quick);
813 PF_MD5_UPD(rule, ifnot);
814 PF_MD5_UPD(rule, rcvifnot);
815 PF_MD5_UPD(rule, match_tag_not);
816 PF_MD5_UPD(rule, keep_state);
817 PF_MD5_UPD(rule, proto);
818 PF_MD5_UPD(rule, type);
819 PF_MD5_UPD(rule, code);
820 PF_MD5_UPD(rule, flags);
821 PF_MD5_UPD(rule, flagset);
822 PF_MD5_UPD(rule, allow_opts);
823 PF_MD5_UPD(rule, rt);
824 PF_MD5_UPD(rule, tos);
825 }
826
827 int
828 pf_commit_rules(u_int32_t ticket, char *anchor)
829 {
830 struct pf_ruleset *rs;
831 struct pf_rule *rule;
832 struct pf_rulequeue *old_rules;
833 u_int32_t old_rcount;
834
835 rs = pf_find_ruleset(anchor);
836 if (rs == NULL || !rs->rules.inactive.open ||
837 ticket != rs->rules.inactive.ticket)
838 return (EBUSY);
839
840 if (rs == &pf_main_ruleset)
841 pf_calc_chksum(rs);
842
843 /* Swap rules, keep the old. */
844 old_rules = rs->rules.active.ptr;
845 old_rcount = rs->rules.active.rcount;
846
847 rs->rules.active.ptr = rs->rules.inactive.ptr;
848 rs->rules.active.rcount = rs->rules.inactive.rcount;
849 rs->rules.inactive.ptr = old_rules;
850 rs->rules.inactive.rcount = old_rcount;
851
852 rs->rules.active.ticket = rs->rules.inactive.ticket;
853 pf_calc_skip_steps(rs->rules.active.ptr);
854
855
856 /* Purge the old rule list. */
857 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
858 pf_rm_rule(old_rules, rule);
859 rs->rules.inactive.rcount = 0;
860 rs->rules.inactive.open = 0;
861 pf_remove_if_empty_ruleset(rs);
862
863 /* queue defs only in the main ruleset */
864 if (anchor[0])
865 return (0);
866 return (pf_commit_queues());
867 }
868
869 void
870 pf_calc_chksum(struct pf_ruleset *rs)
871 {
872 MD5_CTX ctx;
873 struct pf_rule *rule;
874 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
875
876 MD5Init(&ctx);
877
878 if (rs->rules.inactive.rcount) {
879 TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) {
880 pf_hash_rule(&ctx, rule);
881 }
882 }
883
884 MD5Final(digest, &ctx);
885 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
886 }
887
888 int
889 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
890 sa_family_t af)
891 {
892 if (pfi_dynaddr_setup(addr, af, PR_WAITOK) ||
893 pf_tbladdr_setup(ruleset, addr, PR_WAITOK) ||
894 pf_rtlabel_add(addr))
895 return (EINVAL);
896
897 return (0);
898 }
899
900 struct pfi_kif *
901 pf_kif_setup(struct pfi_kif *kif_buf)
902 {
903 struct pfi_kif *kif;
904
905 if (kif_buf == NULL)
906 return (NULL);
907
908 KASSERT(kif_buf->pfik_name[0] != '\0');
909
910 kif = pfi_kif_get(kif_buf->pfik_name, &kif_buf);
911 if (kif_buf != NULL)
912 pfi_kif_free(kif_buf);
913 pfi_kif_ref(kif, PFI_KIF_REF_RULE);
914
915 return (kif);
916 }
917
918 void
919 pf_addr_copyout(struct pf_addr_wrap *addr)
920 {
921 pfi_dynaddr_copyout(addr);
922 pf_tbladdr_copyout(addr);
923 pf_rtlabel_copyout(addr);
924 }
925
926 int
927 pf_states_clr(struct pfioc_state_kill *psk)
928 {
929 struct pf_state *st, *nextst;
930 struct pf_state *head, *tail;
931 u_int killed = 0;
932 int error;
933
934 NET_LOCK();
935
936 /* lock against the gc removing an item from the list */
937 error = rw_enter(&pf_state_list.pfs_rwl, RW_READ|RW_INTR);
938 if (error != 0)
939 goto unlock;
940
941 /* get a snapshot view of the ends of the list to traverse between */
942 mtx_enter(&pf_state_list.pfs_mtx);
943 head = TAILQ_FIRST(&pf_state_list.pfs_list);
944 tail = TAILQ_LAST(&pf_state_list.pfs_list, pf_state_queue);
945 mtx_leave(&pf_state_list.pfs_mtx);
946
947 st = NULL;
948 nextst = head;
949
950 PF_LOCK();
951 PF_STATE_ENTER_WRITE();
952
953 while (st != tail) {
954 st = nextst;
955 nextst = TAILQ_NEXT(st, entry_list);
956
957 if (st->timeout == PFTM_UNLINKED)
958 continue;
959
960 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
961 st->kif->pfik_name)) {
962 #if NPFSYNC > 0
963 /* don't send out individual delete messages */
964 SET(st->state_flags, PFSTATE_NOSYNC);
965 #endif /* NPFSYNC > 0 */
966 pf_remove_state(st);
967 killed++;
968 }
969 }
970
971 PF_STATE_EXIT_WRITE();
972 #if NPFSYNC > 0
973 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
974 #endif /* NPFSYNC > 0 */
975 PF_UNLOCK();
976 rw_exit(&pf_state_list.pfs_rwl);
977
978 psk->psk_killed = killed;
979 unlock:
980 NET_UNLOCK();
981
982 return (error);
983 }
984
985 int
986 pf_states_get(struct pfioc_states *ps)
987 {
988 struct pf_state *st, *nextst;
989 struct pf_state *head, *tail;
990 struct pfsync_state *p, pstore;
991 u_int32_t nr = 0;
992 int error;
993
994 if (ps->ps_len == 0) {
995 nr = pf_status.states;
996 ps->ps_len = sizeof(struct pfsync_state) * nr;
997 return (0);
998 }
999
1000 p = ps->ps_states;
1001
1002 /* lock against the gc removing an item from the list */
1003 error = rw_enter(&pf_state_list.pfs_rwl, RW_READ|RW_INTR);
1004 if (error != 0)
1005 return (error);
1006
1007 /* get a snapshot view of the ends of the list to traverse between */
1008 mtx_enter(&pf_state_list.pfs_mtx);
1009 head = TAILQ_FIRST(&pf_state_list.pfs_list);
1010 tail = TAILQ_LAST(&pf_state_list.pfs_list, pf_state_queue);
1011 mtx_leave(&pf_state_list.pfs_mtx);
1012
1013 st = NULL;
1014 nextst = head;
1015
1016 while (st != tail) {
1017 st = nextst;
1018 nextst = TAILQ_NEXT(st, entry_list);
1019
1020 if (st->timeout == PFTM_UNLINKED)
1021 continue;
1022
1023 if ((nr+1) * sizeof(*p) > ps->ps_len)
1024 break;
1025
1026 pf_state_export(&pstore, st);
1027 error = copyout(&pstore, p, sizeof(*p));
1028 if (error)
1029 goto fail;
1030
1031 p++;
1032 nr++;
1033 }
1034 ps->ps_len = sizeof(struct pfsync_state) * nr;
1035
1036 fail:
1037 rw_exit(&pf_state_list.pfs_rwl);
1038
1039 return (error);
1040 }
1041
1042 int
1043 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1044 {
1045 int error = 0;
1046
1047 /* XXX keep in sync with switch() below */
1048 if (securelevel > 1)
1049 switch (cmd) {
1050 case DIOCGETRULES:
1051 case DIOCGETRULE:
1052 case DIOCGETSTATE:
1053 case DIOCSETSTATUSIF:
1054 case DIOCGETSTATUS:
1055 case DIOCCLRSTATUS:
1056 case DIOCNATLOOK:
1057 case DIOCSETDEBUG:
1058 case DIOCGETSTATES:
1059 case DIOCGETTIMEOUT:
1060 case DIOCGETLIMIT:
1061 case DIOCGETRULESETS:
1062 case DIOCGETRULESET:
1063 case DIOCGETQUEUES:
1064 case DIOCGETQUEUE:
1065 case DIOCGETQSTATS:
1066 case DIOCRGETTABLES:
1067 case DIOCRGETTSTATS:
1068 case DIOCRCLRTSTATS:
1069 case DIOCRCLRADDRS:
1070 case DIOCRADDADDRS:
1071 case DIOCRDELADDRS:
1072 case DIOCRSETADDRS:
1073 case DIOCRGETADDRS:
1074 case DIOCRGETASTATS:
1075 case DIOCRCLRASTATS:
1076 case DIOCRTSTADDRS:
1077 case DIOCOSFPGET:
1078 case DIOCGETSRCNODES:
1079 case DIOCCLRSRCNODES:
1080 case DIOCIGETIFACES:
1081 case DIOCSETIFFLAG:
1082 case DIOCCLRIFFLAG:
1083 case DIOCGETSYNFLWATS:
1084 break;
1085 case DIOCRCLRTABLES:
1086 case DIOCRADDTABLES:
1087 case DIOCRDELTABLES:
1088 case DIOCRSETTFLAGS:
1089 if (((struct pfioc_table *)addr)->pfrio_flags &
1090 PFR_FLAG_DUMMY)
1091 break; /* dummy operation ok */
1092 return (EPERM);
1093 default:
1094 return (EPERM);
1095 }
1096
1097 if (!(flags & FWRITE))
1098 switch (cmd) {
1099 case DIOCGETRULES:
1100 case DIOCGETSTATE:
1101 case DIOCGETSTATUS:
1102 case DIOCGETSTATES:
1103 case DIOCGETTIMEOUT:
1104 case DIOCGETLIMIT:
1105 case DIOCGETRULESETS:
1106 case DIOCGETRULESET:
1107 case DIOCGETQUEUES:
1108 case DIOCGETQUEUE:
1109 case DIOCGETQSTATS:
1110 case DIOCNATLOOK:
1111 case DIOCRGETTABLES:
1112 case DIOCRGETTSTATS:
1113 case DIOCRGETADDRS:
1114 case DIOCRGETASTATS:
1115 case DIOCRTSTADDRS:
1116 case DIOCOSFPGET:
1117 case DIOCGETSRCNODES:
1118 case DIOCIGETIFACES:
1119 case DIOCGETSYNFLWATS:
1120 break;
1121 case DIOCRCLRTABLES:
1122 case DIOCRADDTABLES:
1123 case DIOCRDELTABLES:
1124 case DIOCRCLRTSTATS:
1125 case DIOCRCLRADDRS:
1126 case DIOCRADDADDRS:
1127 case DIOCRDELADDRS:
1128 case DIOCRSETADDRS:
1129 case DIOCRSETTFLAGS:
1130 if (((struct pfioc_table *)addr)->pfrio_flags &
1131 PFR_FLAG_DUMMY) {
1132 flags |= FWRITE; /* need write lock for dummy */
1133 break; /* dummy operation ok */
1134 }
1135 return (EACCES);
1136 case DIOCGETRULE:
1137 if (((struct pfioc_rule *)addr)->action ==
1138 PF_GET_CLR_CNTR)
1139 return (EACCES);
1140 break;
1141 default:
1142 return (EACCES);
1143 }
1144
1145 if (flags & FWRITE)
1146 rw_enter_write(&pfioctl_rw);
1147 else
1148 rw_enter_read(&pfioctl_rw);
1149
1150 switch (cmd) {
1151
1152 case DIOCSTART:
1153 NET_LOCK();
1154 PF_LOCK();
1155 if (pf_status.running)
1156 error = EEXIST;
1157 else {
1158 pf_status.running = 1;
1159 pf_status.since = getuptime();
1160 if (pf_status.stateid == 0) {
1161 pf_status.stateid = gettime();
1162 pf_status.stateid = pf_status.stateid << 32;
1163 }
1164 timeout_add_sec(&pf_purge_to, 1);
1165 pf_create_queues();
1166 DPFPRINTF(LOG_NOTICE, "pf: started");
1167 }
1168 PF_UNLOCK();
1169 NET_UNLOCK();
1170 break;
1171
1172 case DIOCSTOP:
1173 NET_LOCK();
1174 PF_LOCK();
1175 if (!pf_status.running)
1176 error = ENOENT;
1177 else {
1178 pf_status.running = 0;
1179 pf_status.since = getuptime();
1180 pf_remove_queues();
1181 DPFPRINTF(LOG_NOTICE, "pf: stopped");
1182 }
1183 PF_UNLOCK();
1184 NET_UNLOCK();
1185 break;
1186
1187 case DIOCGETQUEUES: {
1188 struct pfioc_queue *pq = (struct pfioc_queue *)addr;
1189 struct pf_queuespec *qs;
1190 u_int32_t nr = 0;
1191
1192 NET_LOCK();
1193 PF_LOCK();
1194 pq->ticket = pf_main_ruleset.rules.active.ticket;
1195
1196 /* save state to not run over them all each time? */
1197 qs = TAILQ_FIRST(pf_queues_active);
1198 while (qs != NULL) {
1199 qs = TAILQ_NEXT(qs, entries);
1200 nr++;
1201 }
1202 pq->nr = nr;
1203 PF_UNLOCK();
1204 NET_UNLOCK();
1205 break;
1206 }
1207
1208 case DIOCGETQUEUE: {
1209 struct pfioc_queue *pq = (struct pfioc_queue *)addr;
1210 struct pf_queuespec *qs;
1211 u_int32_t nr = 0;
1212
1213 NET_LOCK();
1214 PF_LOCK();
1215 if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
1216 error = EBUSY;
1217 PF_UNLOCK();
1218 NET_UNLOCK();
1219 goto fail;
1220 }
1221
1222 /* save state to not run over them all each time? */
1223 qs = TAILQ_FIRST(pf_queues_active);
1224 while ((qs != NULL) && (nr++ < pq->nr))
1225 qs = TAILQ_NEXT(qs, entries);
1226 if (qs == NULL) {
1227 error = EBUSY;
1228 PF_UNLOCK();
1229 NET_UNLOCK();
1230 goto fail;
1231 }
1232 memcpy(&pq->queue, qs, sizeof(pq->queue));
1233 PF_UNLOCK();
1234 NET_UNLOCK();
1235 break;
1236 }
1237
1238 case DIOCGETQSTATS: {
1239 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
1240 struct pf_queuespec *qs;
1241 u_int32_t nr;
1242 int nbytes;
1243
1244 NET_LOCK();
1245 PF_LOCK();
1246 if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
1247 error = EBUSY;
1248 PF_UNLOCK();
1249 NET_UNLOCK();
1250 goto fail;
1251 }
1252 nbytes = pq->nbytes;
1253 nr = 0;
1254
1255 /* save state to not run over them all each time? */
1256 qs = TAILQ_FIRST(pf_queues_active);
1257 while ((qs != NULL) && (nr++ < pq->nr))
1258 qs = TAILQ_NEXT(qs, entries);
1259 if (qs == NULL) {
1260 error = EBUSY;
1261 PF_UNLOCK();
1262 NET_UNLOCK();
1263 goto fail;
1264 }
1265 memcpy(&pq->queue, qs, sizeof(pq->queue));
1266 /* It's a root flow queue but is not an HFSC root class */
1267 if ((qs->flags & PFQS_FLOWQUEUE) && qs->parent_qid == 0 &&
1268 !(qs->flags & PFQS_ROOTCLASS))
1269 error = pfq_fqcodel_ops->pfq_qstats(qs, pq->buf,
1270 &nbytes);
1271 else
1272 error = pfq_hfsc_ops->pfq_qstats(qs, pq->buf,
1273 &nbytes);
1274 if (error == 0)
1275 pq->nbytes = nbytes;
1276 PF_UNLOCK();
1277 NET_UNLOCK();
1278 break;
1279 }
1280
1281 case DIOCADDQUEUE: {
1282 struct pfioc_queue *q = (struct pfioc_queue *)addr;
1283 struct pf_queuespec *qs;
1284
1285 qs = pool_get(&pf_queue_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1286 if (qs == NULL) {
1287 error = ENOMEM;
1288 goto fail;
1289 }
1290
1291 NET_LOCK();
1292 PF_LOCK();
1293 if (q->ticket != pf_main_ruleset.rules.inactive.ticket) {
1294 error = EBUSY;
1295 PF_UNLOCK();
1296 NET_UNLOCK();
1297 pool_put(&pf_queue_pl, qs);
1298 goto fail;
1299 }
1300 memcpy(qs, &q->queue, sizeof(*qs));
1301 qs->qid = pf_qname2qid(qs->qname, 1);
1302 if (qs->qid == 0) {
1303 error = EBUSY;
1304 PF_UNLOCK();
1305 NET_UNLOCK();
1306 pool_put(&pf_queue_pl, qs);
1307 goto fail;
1308 }
1309 if (qs->parent[0] && (qs->parent_qid =
1310 pf_qname2qid(qs->parent, 0)) == 0) {
1311 error = ESRCH;
1312 PF_UNLOCK();
1313 NET_UNLOCK();
1314 pool_put(&pf_queue_pl, qs);
1315 goto fail;
1316 }
1317 qs->kif = pfi_kif_get(qs->ifname, NULL);
1318 if (qs->kif == NULL) {
1319 error = ESRCH;
1320 PF_UNLOCK();
1321 NET_UNLOCK();
1322 pool_put(&pf_queue_pl, qs);
1323 goto fail;
1324 }
1325 /* XXX resolve bw percentage specs */
1326 pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE);
1327
1328 TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries);
1329 PF_UNLOCK();
1330 NET_UNLOCK();
1331
1332 break;
1333 }
1334
1335 case DIOCADDRULE: {
1336 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1337 struct pf_ruleset *ruleset;
1338 struct pf_rule *rule, *tail;
1339
1340 rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1341 if (rule == NULL) {
1342 error = ENOMEM;
1343 goto fail;
1344 }
1345
1346 if ((error = pf_rule_copyin(&pr->rule, rule))) {
1347 pf_rule_free(rule);
1348 rule = NULL;
1349 goto fail;
1350 }
1351
1352 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1353 error = EINVAL;
1354 pf_rule_free(rule);
1355 rule = NULL;
1356 goto fail;
1357 }
1358 if ((error = pf_rule_checkaf(rule))) {
1359 pf_rule_free(rule);
1360 rule = NULL;
1361 goto fail;
1362 }
1363 if (rule->src.addr.type == PF_ADDR_NONE ||
1364 rule->dst.addr.type == PF_ADDR_NONE) {
1365 error = EINVAL;
1366 pf_rule_free(rule);
1367 rule = NULL;
1368 goto fail;
1369 }
1370
1371 if (rule->rt && !rule->direction) {
1372 error = EINVAL;
1373 pf_rule_free(rule);
1374 rule = NULL;
1375 goto fail;
1376 }
1377
1378 NET_LOCK();
1379 PF_LOCK();
1380 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1381 ruleset = pf_find_ruleset(pr->anchor);
1382 if (ruleset == NULL) {
1383 error = EINVAL;
1384 PF_UNLOCK();
1385 NET_UNLOCK();
1386 pf_rule_free(rule);
1387 goto fail;
1388 }
1389 if (pr->ticket != ruleset->rules.inactive.ticket) {
1390 error = EBUSY;
1391 PF_UNLOCK();
1392 NET_UNLOCK();
1393 pf_rule_free(rule);
1394 goto fail;
1395 }
1396 rule->cuid = p->p_ucred->cr_ruid;
1397 rule->cpid = p->p_p->ps_pid;
1398
1399 tail = TAILQ_LAST(ruleset->rules.inactive.ptr,
1400 pf_rulequeue);
1401 if (tail)
1402 rule->nr = tail->nr + 1;
1403 else
1404 rule->nr = 0;
1405
1406 rule->kif = pf_kif_setup(rule->kif);
1407 rule->rcv_kif = pf_kif_setup(rule->rcv_kif);
1408 rule->rdr.kif = pf_kif_setup(rule->rdr.kif);
1409 rule->nat.kif = pf_kif_setup(rule->nat.kif);
1410 rule->route.kif = pf_kif_setup(rule->route.kif);
1411
1412 if (rule->overload_tblname[0]) {
1413 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1414 rule->overload_tblname, PR_WAITOK)) == NULL)
1415 error = EINVAL;
1416 else
1417 rule->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE;
1418 }
1419
1420 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1421 error = EINVAL;
1422 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1423 error = EINVAL;
1424 if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af))
1425 error = EINVAL;
1426 if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af))
1427 error = EINVAL;
1428 if (pf_addr_setup(ruleset, &rule->route.addr, rule->af))
1429 error = EINVAL;
1430 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1431 error = EINVAL;
1432
1433 if (error) {
1434 pf_rm_rule(NULL, rule);
1435 PF_UNLOCK();
1436 NET_UNLOCK();
1437 goto fail;
1438 }
1439 TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr,
1440 rule, entries);
1441 ruleset->rules.inactive.rcount++;
1442 PF_UNLOCK();
1443 NET_UNLOCK();
1444 break;
1445 }
1446
1447 case DIOCGETRULES: {
1448 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1449 struct pf_ruleset *ruleset;
1450 struct pf_rule *tail;
1451
1452 NET_LOCK();
1453 PF_LOCK();
1454 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1455 ruleset = pf_find_ruleset(pr->anchor);
1456 if (ruleset == NULL) {
1457 error = EINVAL;
1458 PF_UNLOCK();
1459 NET_UNLOCK();
1460 goto fail;
1461 }
1462 tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue);
1463 if (tail)
1464 pr->nr = tail->nr + 1;
1465 else
1466 pr->nr = 0;
1467 pr->ticket = ruleset->rules.active.ticket;
1468 PF_UNLOCK();
1469 NET_UNLOCK();
1470 break;
1471 }
1472
1473 case DIOCGETRULE: {
1474 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1475 struct pf_ruleset *ruleset;
1476 struct pf_rule *rule;
1477 int i;
1478
1479 NET_LOCK();
1480 PF_LOCK();
1481 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1482 ruleset = pf_find_ruleset(pr->anchor);
1483 if (ruleset == NULL) {
1484 error = EINVAL;
1485 PF_UNLOCK();
1486 NET_UNLOCK();
1487 goto fail;
1488 }
1489 if (pr->ticket != ruleset->rules.active.ticket) {
1490 error = EBUSY;
1491 PF_UNLOCK();
1492 NET_UNLOCK();
1493 goto fail;
1494 }
1495 rule = TAILQ_FIRST(ruleset->rules.active.ptr);
1496 while ((rule != NULL) && (rule->nr != pr->nr))
1497 rule = TAILQ_NEXT(rule, entries);
1498 if (rule == NULL) {
1499 error = EBUSY;
1500 PF_UNLOCK();
1501 NET_UNLOCK();
1502 goto fail;
1503 }
1504 memcpy(&pr->rule, rule, sizeof(struct pf_rule));
1505 memset(&pr->rule.entries, 0, sizeof(pr->rule.entries));
1506 pr->rule.kif = NULL;
1507 pr->rule.nat.kif = NULL;
1508 pr->rule.rdr.kif = NULL;
1509 pr->rule.route.kif = NULL;
1510 pr->rule.rcv_kif = NULL;
1511 pr->rule.anchor = NULL;
1512 pr->rule.overload_tbl = NULL;
1513 pr->rule.pktrate.limit /= PF_THRESHOLD_MULT;
1514 if (pf_anchor_copyout(ruleset, rule, pr)) {
1515 error = EBUSY;
1516 PF_UNLOCK();
1517 NET_UNLOCK();
1518 goto fail;
1519 }
1520 pf_addr_copyout(&pr->rule.src.addr);
1521 pf_addr_copyout(&pr->rule.dst.addr);
1522 pf_addr_copyout(&pr->rule.rdr.addr);
1523 pf_addr_copyout(&pr->rule.nat.addr);
1524 pf_addr_copyout(&pr->rule.route.addr);
1525 for (i = 0; i < PF_SKIP_COUNT; ++i)
1526 if (rule->skip[i].ptr == NULL)
1527 pr->rule.skip[i].nr = (u_int32_t)-1;
1528 else
1529 pr->rule.skip[i].nr =
1530 rule->skip[i].ptr->nr;
1531
1532 if (pr->action == PF_GET_CLR_CNTR) {
1533 rule->evaluations = 0;
1534 rule->packets[0] = rule->packets[1] = 0;
1535 rule->bytes[0] = rule->bytes[1] = 0;
1536 rule->states_tot = 0;
1537 }
1538 PF_UNLOCK();
1539 NET_UNLOCK();
1540 break;
1541 }
1542
1543 case DIOCCHANGERULE: {
1544 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1545 struct pf_ruleset *ruleset;
1546 struct pf_rule *oldrule = NULL, *newrule = NULL;
1547 u_int32_t nr = 0;
1548
1549 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1550 pcr->action > PF_CHANGE_GET_TICKET) {
1551 error = EINVAL;
1552 goto fail;
1553 }
1554
1555 if (pcr->action == PF_CHANGE_GET_TICKET) {
1556 NET_LOCK();
1557 PF_LOCK();
1558
1559 ruleset = pf_find_ruleset(pcr->anchor);
1560 if (ruleset == NULL)
1561 error = EINVAL;
1562 else
1563 pcr->ticket = ++ruleset->rules.active.ticket;
1564
1565 PF_UNLOCK();
1566 NET_UNLOCK();
1567 goto fail;
1568 }
1569
1570 if (pcr->action != PF_CHANGE_REMOVE) {
1571 newrule = pool_get(&pf_rule_pl,
1572 PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1573 if (newrule == NULL) {
1574 error = ENOMEM;
1575 goto fail;
1576 }
1577
1578 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1579 error = EINVAL;
1580 pool_put(&pf_rule_pl, newrule);
1581 goto fail;
1582 }
1583 error = pf_rule_copyin(&pcr->rule, newrule);
1584 if (error != 0) {
1585 pf_rule_free(newrule);
1586 newrule = NULL;
1587 goto fail;
1588 }
1589 if ((error = pf_rule_checkaf(newrule))) {
1590 pf_rule_free(newrule);
1591 newrule = NULL;
1592 goto fail;
1593 }
1594 if (newrule->rt && !newrule->direction) {
1595 pf_rule_free(newrule);
1596 error = EINVAL;
1597 newrule = NULL;
1598 goto fail;
1599 }
1600 }
1601
1602 NET_LOCK();
1603 PF_LOCK();
1604 ruleset = pf_find_ruleset(pcr->anchor);
1605 if (ruleset == NULL) {
1606 error = EINVAL;
1607 PF_UNLOCK();
1608 NET_UNLOCK();
1609 pf_rule_free(newrule);
1610 goto fail;
1611 }
1612
1613 if (pcr->ticket != ruleset->rules.active.ticket) {
1614 error = EINVAL;
1615 PF_UNLOCK();
1616 NET_UNLOCK();
1617 pf_rule_free(newrule);
1618 goto fail;
1619 }
1620
1621 if (pcr->action != PF_CHANGE_REMOVE) {
1622 KASSERT(newrule != NULL);
1623 newrule->cuid = p->p_ucred->cr_ruid;
1624 newrule->cpid = p->p_p->ps_pid;
1625
1626 newrule->kif = pf_kif_setup(newrule->kif);
1627 newrule->rcv_kif = pf_kif_setup(newrule->rcv_kif);
1628 newrule->rdr.kif = pf_kif_setup(newrule->rdr.kif);
1629 newrule->nat.kif = pf_kif_setup(newrule->nat.kif);
1630 newrule->route.kif = pf_kif_setup(newrule->route.kif);
1631
1632 if (newrule->overload_tblname[0]) {
1633 newrule->overload_tbl = pfr_attach_table(
1634 ruleset, newrule->overload_tblname,
1635 PR_WAITOK);
1636 if (newrule->overload_tbl == NULL)
1637 error = EINVAL;
1638 else
1639 newrule->overload_tbl->pfrkt_flags |=
1640 PFR_TFLAG_ACTIVE;
1641 }
1642
1643 if (pf_addr_setup(ruleset, &newrule->src.addr,
1644 newrule->af))
1645 error = EINVAL;
1646 if (pf_addr_setup(ruleset, &newrule->dst.addr,
1647 newrule->af))
1648 error = EINVAL;
1649 if (pf_addr_setup(ruleset, &newrule->rdr.addr,
1650 newrule->af))
1651 error = EINVAL;
1652 if (pf_addr_setup(ruleset, &newrule->nat.addr,
1653 newrule->af))
1654 error = EINVAL;
1655 if (pf_addr_setup(ruleset, &newrule->route.addr,
1656 newrule->af))
1657 error = EINVAL;
1658 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1659 error = EINVAL;
1660
1661 if (error) {
1662 pf_rm_rule(NULL, newrule);
1663 PF_UNLOCK();
1664 NET_UNLOCK();
1665 goto fail;
1666 }
1667 }
1668
1669 if (pcr->action == PF_CHANGE_ADD_HEAD)
1670 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1671 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1672 oldrule = TAILQ_LAST(ruleset->rules.active.ptr,
1673 pf_rulequeue);
1674 else {
1675 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1676 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1677 oldrule = TAILQ_NEXT(oldrule, entries);
1678 if (oldrule == NULL) {
1679 if (newrule != NULL)
1680 pf_rm_rule(NULL, newrule);
1681 error = EINVAL;
1682 PF_UNLOCK();
1683 NET_UNLOCK();
1684 goto fail;
1685 }
1686 }
1687
1688 if (pcr->action == PF_CHANGE_REMOVE) {
1689 pf_rm_rule(ruleset->rules.active.ptr, oldrule);
1690 ruleset->rules.active.rcount--;
1691 } else {
1692 if (oldrule == NULL)
1693 TAILQ_INSERT_TAIL(
1694 ruleset->rules.active.ptr,
1695 newrule, entries);
1696 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1697 pcr->action == PF_CHANGE_ADD_BEFORE)
1698 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1699 else
1700 TAILQ_INSERT_AFTER(
1701 ruleset->rules.active.ptr,
1702 oldrule, newrule, entries);
1703 ruleset->rules.active.rcount++;
1704 }
1705
1706 nr = 0;
1707 TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries)
1708 oldrule->nr = nr++;
1709
1710 ruleset->rules.active.ticket++;
1711
1712 pf_calc_skip_steps(ruleset->rules.active.ptr);
1713 pf_remove_if_empty_ruleset(ruleset);
1714
1715 PF_UNLOCK();
1716 NET_UNLOCK();
1717 break;
1718 }
1719
1720 case DIOCCLRSTATES:
1721 error = pf_states_clr((struct pfioc_state_kill *)addr);
1722 break;
1723
1724 case DIOCKILLSTATES: {
1725 struct pf_state *st, *nextst;
1726 struct pf_state_item *si, *sit;
1727 struct pf_state_key *sk, key;
1728 struct pf_addr *srcaddr, *dstaddr;
1729 u_int16_t srcport, dstport;
1730 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1731 u_int i, killed = 0;
1732 const int dirs[] = { PF_IN, PF_OUT };
1733 int sidx, didx;
1734
1735 if (psk->psk_pfcmp.id) {
1736 if (psk->psk_pfcmp.creatorid == 0)
1737 psk->psk_pfcmp.creatorid = pf_status.hostid;
1738 NET_LOCK();
1739 PF_LOCK();
1740 PF_STATE_ENTER_WRITE();
1741 if ((st = pf_find_state_byid(&psk->psk_pfcmp))) {
1742 pf_remove_state(st);
1743 psk->psk_killed = 1;
1744 }
1745 PF_STATE_EXIT_WRITE();
1746 PF_UNLOCK();
1747 NET_UNLOCK();
1748 goto fail;
1749 }
1750
1751 if (psk->psk_af && psk->psk_proto &&
1752 psk->psk_src.port_op == PF_OP_EQ &&
1753 psk->psk_dst.port_op == PF_OP_EQ) {
1754
1755 key.af = psk->psk_af;
1756 key.proto = psk->psk_proto;
1757 key.rdomain = psk->psk_rdomain;
1758
1759 NET_LOCK();
1760 PF_LOCK();
1761 PF_STATE_ENTER_WRITE();
1762 for (i = 0; i < nitems(dirs); i++) {
1763 if (dirs[i] == PF_IN) {
1764 sidx = 0;
1765 didx = 1;
1766 } else {
1767 sidx = 1;
1768 didx = 0;
1769 }
1770 pf_addrcpy(&key.addr[sidx],
1771 &psk->psk_src.addr.v.a.addr, key.af);
1772 pf_addrcpy(&key.addr[didx],
1773 &psk->psk_dst.addr.v.a.addr, key.af);
1774 key.port[sidx] = psk->psk_src.port[0];
1775 key.port[didx] = psk->psk_dst.port[0];
1776
1777 sk = RBT_FIND(pf_state_tree, &pf_statetbl,
1778 &key);
1779 if (sk == NULL)
1780 continue;
1781
1782 TAILQ_FOREACH_SAFE(si, &sk->sk_states,
1783 si_entry, sit) {
1784 struct pf_state *sist = si->si_st;
1785 if (((sist->key[PF_SK_WIRE]->af ==
1786 sist->key[PF_SK_STACK]->af &&
1787 sk == (dirs[i] == PF_IN ?
1788 sist->key[PF_SK_WIRE] :
1789 sist->key[PF_SK_STACK])) ||
1790 (sist->key[PF_SK_WIRE]->af !=
1791 sist->key[PF_SK_STACK]->af &&
1792 dirs[i] == PF_IN &&
1793 (sk == sist->key[PF_SK_STACK] ||
1794 sk == sist->key[PF_SK_WIRE]))) &&
1795 (!psk->psk_ifname[0] ||
1796 (sist->kif != pfi_all &&
1797 !strcmp(psk->psk_ifname,
1798 sist->kif->pfik_name)))) {
1799 pf_remove_state(sist);
1800 killed++;
1801 }
1802 }
1803 }
1804 if (killed)
1805 psk->psk_killed = killed;
1806 PF_STATE_EXIT_WRITE();
1807 PF_UNLOCK();
1808 NET_UNLOCK();
1809 goto fail;
1810 }
1811
1812 NET_LOCK();
1813 PF_LOCK();
1814 PF_STATE_ENTER_WRITE();
1815 RBT_FOREACH_SAFE(st, pf_state_tree_id, &tree_id, nextst) {
1816 if (st->direction == PF_OUT) {
1817 sk = st->key[PF_SK_STACK];
1818 srcaddr = &sk->addr[1];
1819 dstaddr = &sk->addr[0];
1820 srcport = sk->port[1];
1821 dstport = sk->port[0];
1822 } else {
1823 sk = st->key[PF_SK_WIRE];
1824 srcaddr = &sk->addr[0];
1825 dstaddr = &sk->addr[1];
1826 srcport = sk->port[0];
1827 dstport = sk->port[1];
1828 }
1829 if ((!psk->psk_af || sk->af == psk->psk_af)
1830 && (!psk->psk_proto || psk->psk_proto ==
1831 sk->proto) && psk->psk_rdomain == sk->rdomain &&
1832 pf_match_addr(psk->psk_src.neg,
1833 &psk->psk_src.addr.v.a.addr,
1834 &psk->psk_src.addr.v.a.mask,
1835 srcaddr, sk->af) &&
1836 pf_match_addr(psk->psk_dst.neg,
1837 &psk->psk_dst.addr.v.a.addr,
1838 &psk->psk_dst.addr.v.a.mask,
1839 dstaddr, sk->af) &&
1840 (psk->psk_src.port_op == 0 ||
1841 pf_match_port(psk->psk_src.port_op,
1842 psk->psk_src.port[0], psk->psk_src.port[1],
1843 srcport)) &&
1844 (psk->psk_dst.port_op == 0 ||
1845 pf_match_port(psk->psk_dst.port_op,
1846 psk->psk_dst.port[0], psk->psk_dst.port[1],
1847 dstport)) &&
1848 (!psk->psk_label[0] || (st->rule.ptr->label[0] &&
1849 !strcmp(psk->psk_label, st->rule.ptr->label))) &&
1850 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1851 st->kif->pfik_name))) {
1852 pf_remove_state(st);
1853 killed++;
1854 }
1855 }
1856 psk->psk_killed = killed;
1857 PF_STATE_EXIT_WRITE();
1858 PF_UNLOCK();
1859 NET_UNLOCK();
1860 break;
1861 }
1862
1863 #if NPFSYNC > 0
1864 case DIOCADDSTATE: {
1865 struct pfioc_state *ps = (struct pfioc_state *)addr;
1866 struct pfsync_state *sp = &ps->state;
1867
1868 if (sp->timeout >= PFTM_MAX) {
1869 error = EINVAL;
1870 goto fail;
1871 }
1872 NET_LOCK();
1873 PF_LOCK();
1874 error = pf_state_import(sp, PFSYNC_SI_IOCTL);
1875 PF_UNLOCK();
1876 NET_UNLOCK();
1877 break;
1878 }
1879 #endif /* NPFSYNC > 0 */
1880
1881 case DIOCGETSTATE: {
1882 struct pfioc_state *ps = (struct pfioc_state *)addr;
1883 struct pf_state *st;
1884 struct pf_state_cmp id_key;
1885
1886 memset(&id_key, 0, sizeof(id_key));
1887 id_key.id = ps->state.id;
1888 id_key.creatorid = ps->state.creatorid;
1889
1890 NET_LOCK();
1891 PF_STATE_ENTER_READ();
1892 st = pf_find_state_byid(&id_key);
1893 st = pf_state_ref(st);
1894 PF_STATE_EXIT_READ();
1895 NET_UNLOCK();
1896 if (st == NULL) {
1897 error = ENOENT;
1898 goto fail;
1899 }
1900
1901 pf_state_export(&ps->state, st);
1902 pf_state_unref(st);
1903 break;
1904 }
1905
1906 case DIOCGETSTATES:
1907 error = pf_states_get((struct pfioc_states *)addr);
1908 break;
1909
1910 case DIOCGETSTATUS: {
1911 struct pf_status *s = (struct pf_status *)addr;
1912 NET_LOCK();
1913 PF_LOCK();
1914 memcpy(s, &pf_status, sizeof(struct pf_status));
1915 pfi_update_status(s->ifname, s);
1916 PF_UNLOCK();
1917 NET_UNLOCK();
1918 break;
1919 }
1920
1921 case DIOCSETSTATUSIF: {
1922 struct pfioc_iface *pi = (struct pfioc_iface *)addr;
1923
1924 NET_LOCK();
1925 PF_LOCK();
1926 if (pi->pfiio_name[0] == 0) {
1927 memset(pf_status.ifname, 0, IFNAMSIZ);
1928 PF_UNLOCK();
1929 NET_UNLOCK();
1930 goto fail;
1931 }
1932 strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ);
1933 pf_trans_set.mask |= PF_TSET_STATUSIF;
1934 PF_UNLOCK();
1935 NET_UNLOCK();
1936 break;
1937 }
1938
1939 case DIOCCLRSTATUS: {
1940 struct pfioc_iface *pi = (struct pfioc_iface *)addr;
1941
1942 NET_LOCK();
1943 PF_LOCK();
1944 /* if ifname is specified, clear counters there only */
1945 if (pi->pfiio_name[0]) {
1946 pfi_update_status(pi->pfiio_name, NULL);
1947 PF_UNLOCK();
1948 NET_UNLOCK();
1949 goto fail;
1950 }
1951
1952 memset(pf_status.counters, 0, sizeof(pf_status.counters));
1953 memset(pf_status.fcounters, 0, sizeof(pf_status.fcounters));
1954 memset(pf_status.scounters, 0, sizeof(pf_status.scounters));
1955 pf_status.since = getuptime();
1956
1957 PF_UNLOCK();
1958 NET_UNLOCK();
1959 break;
1960 }
1961
1962 case DIOCNATLOOK: {
1963 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1964 struct pf_state_key *sk;
1965 struct pf_state *st;
1966 struct pf_state_key_cmp key;
1967 int m = 0, direction = pnl->direction;
1968 int sidx, didx;
1969
1970 switch (pnl->af) {
1971 case AF_INET:
1972 break;
1973 #ifdef INET6
1974 case AF_INET6:
1975 break;
1976 #endif /* INET6 */
1977 default:
1978 error = EAFNOSUPPORT;
1979 goto fail;
1980 }
1981
1982 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
1983 sidx = (direction == PF_IN) ? 1 : 0;
1984 didx = (direction == PF_IN) ? 0 : 1;
1985
1986 if (!pnl->proto ||
1987 PF_AZERO(&pnl->saddr, pnl->af) ||
1988 PF_AZERO(&pnl->daddr, pnl->af) ||
1989 ((pnl->proto == IPPROTO_TCP ||
1990 pnl->proto == IPPROTO_UDP) &&
1991 (!pnl->dport || !pnl->sport)) ||
1992 pnl->rdomain > RT_TABLEID_MAX)
1993 error = EINVAL;
1994 else {
1995 key.af = pnl->af;
1996 key.proto = pnl->proto;
1997 key.rdomain = pnl->rdomain;
1998 pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af);
1999 key.port[sidx] = pnl->sport;
2000 pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af);
2001 key.port[didx] = pnl->dport;
2002
2003 NET_LOCK();
2004 PF_STATE_ENTER_READ();
2005 st = pf_find_state_all(&key, direction, &m);
2006 st = pf_state_ref(st);
2007 PF_STATE_EXIT_READ();
2008 NET_UNLOCK();
2009
2010 if (m > 1)
2011 error = E2BIG; /* more than one state */
2012 else if (st != NULL) {
2013 sk = st->key[sidx];
2014 pf_addrcpy(&pnl->rsaddr, &sk->addr[sidx],
2015 sk->af);
2016 pnl->rsport = sk->port[sidx];
2017 pf_addrcpy(&pnl->rdaddr, &sk->addr[didx],
2018 sk->af);
2019 pnl->rdport = sk->port[didx];
2020 pnl->rrdomain = sk->rdomain;
2021 } else
2022 error = ENOENT;
2023 pf_state_unref(st);
2024 }
2025 break;
2026 }
2027
2028 case DIOCSETTIMEOUT: {
2029 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2030
2031 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
2032 pt->seconds < 0) {
2033 error = EINVAL;
2034 goto fail;
2035 }
2036 NET_LOCK();
2037 PF_LOCK();
2038 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
2039 pt->seconds = 1;
2040 pf_default_rule_new.timeout[pt->timeout] = pt->seconds;
2041 pt->seconds = pf_default_rule.timeout[pt->timeout];
2042 PF_UNLOCK();
2043 NET_UNLOCK();
2044 break;
2045 }
2046
2047 case DIOCGETTIMEOUT: {
2048 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2049
2050 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
2051 error = EINVAL;
2052 goto fail;
2053 }
2054 NET_LOCK();
2055 PF_LOCK();
2056 pt->seconds = pf_default_rule.timeout[pt->timeout];
2057 PF_UNLOCK();
2058 NET_UNLOCK();
2059 break;
2060 }
2061
2062 case DIOCGETLIMIT: {
2063 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2064
2065 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
2066 error = EINVAL;
2067 goto fail;
2068 }
2069 NET_LOCK();
2070 PF_LOCK();
2071 pl->limit = pf_pool_limits[pl->index].limit;
2072 PF_UNLOCK();
2073 NET_UNLOCK();
2074 break;
2075 }
2076
2077 case DIOCSETLIMIT: {
2078 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2079
2080 NET_LOCK();
2081 PF_LOCK();
2082 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
2083 pf_pool_limits[pl->index].pp == NULL) {
2084 error = EINVAL;
2085 PF_UNLOCK();
2086 NET_UNLOCK();
2087 goto fail;
2088 }
2089 if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout >
2090 pl->limit) {
2091 error = EBUSY;
2092 PF_UNLOCK();
2093 NET_UNLOCK();
2094 goto fail;
2095 }
2096 /* Fragments reference mbuf clusters. */
2097 if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) {
2098 error = EINVAL;
2099 PF_UNLOCK();
2100 NET_UNLOCK();
2101 goto fail;
2102 }
2103
2104 pf_pool_limits[pl->index].limit_new = pl->limit;
2105 pl->limit = pf_pool_limits[pl->index].limit;
2106 PF_UNLOCK();
2107 NET_UNLOCK();
2108 break;
2109 }
2110
2111 case DIOCSETDEBUG: {
2112 u_int32_t *level = (u_int32_t *)addr;
2113
2114 NET_LOCK();
2115 PF_LOCK();
2116 pf_trans_set.debug = *level;
2117 pf_trans_set.mask |= PF_TSET_DEBUG;
2118 PF_UNLOCK();
2119 NET_UNLOCK();
2120 break;
2121 }
2122
2123 case DIOCGETRULESETS: {
2124 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2125 struct pf_ruleset *ruleset;
2126 struct pf_anchor *anchor;
2127
2128 NET_LOCK();
2129 PF_LOCK();
2130 pr->path[sizeof(pr->path) - 1] = '\0';
2131 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2132 error = EINVAL;
2133 PF_UNLOCK();
2134 NET_UNLOCK();
2135 goto fail;
2136 }
2137 pr->nr = 0;
2138 if (ruleset == &pf_main_ruleset) {
2139 /* XXX kludge for pf_main_ruleset */
2140 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2141 if (anchor->parent == NULL)
2142 pr->nr++;
2143 } else {
2144 RB_FOREACH(anchor, pf_anchor_node,
2145 &ruleset->anchor->children)
2146 pr->nr++;
2147 }
2148 PF_UNLOCK();
2149 NET_UNLOCK();
2150 break;
2151 }
2152
2153 case DIOCGETRULESET: {
2154 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2155 struct pf_ruleset *ruleset;
2156 struct pf_anchor *anchor;
2157 u_int32_t nr = 0;
2158
2159 NET_LOCK();
2160 PF_LOCK();
2161 pr->path[sizeof(pr->path) - 1] = '\0';
2162 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2163 error = EINVAL;
2164 PF_UNLOCK();
2165 NET_UNLOCK();
2166 goto fail;
2167 }
2168 pr->name[0] = '\0';
2169 if (ruleset == &pf_main_ruleset) {
2170 /* XXX kludge for pf_main_ruleset */
2171 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2172 if (anchor->parent == NULL && nr++ == pr->nr) {
2173 strlcpy(pr->name, anchor->name,
2174 sizeof(pr->name));
2175 break;
2176 }
2177 } else {
2178 RB_FOREACH(anchor, pf_anchor_node,
2179 &ruleset->anchor->children)
2180 if (nr++ == pr->nr) {
2181 strlcpy(pr->name, anchor->name,
2182 sizeof(pr->name));
2183 break;
2184 }
2185 }
2186 PF_UNLOCK();
2187 NET_UNLOCK();
2188 if (!pr->name[0])
2189 error = EBUSY;
2190 break;
2191 }
2192
2193 case DIOCRCLRTABLES: {
2194 struct pfioc_table *io = (struct pfioc_table *)addr;
2195
2196 if (io->pfrio_esize != 0) {
2197 error = ENODEV;
2198 goto fail;
2199 }
2200 NET_LOCK();
2201 PF_LOCK();
2202 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2203 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2204 PF_UNLOCK();
2205 NET_UNLOCK();
2206 break;
2207 }
2208
2209 case DIOCRADDTABLES: {
2210 struct pfioc_table *io = (struct pfioc_table *)addr;
2211
2212 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2213 error = ENODEV;
2214 goto fail;
2215 }
2216 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2217 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2218 break;
2219 }
2220
2221 case DIOCRDELTABLES: {
2222 struct pfioc_table *io = (struct pfioc_table *)addr;
2223
2224 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2225 error = ENODEV;
2226 goto fail;
2227 }
2228 NET_LOCK();
2229 PF_LOCK();
2230 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2231 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2232 PF_UNLOCK();
2233 NET_UNLOCK();
2234 break;
2235 }
2236
2237 case DIOCRGETTABLES: {
2238 struct pfioc_table *io = (struct pfioc_table *)addr;
2239
2240 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2241 error = ENODEV;
2242 goto fail;
2243 }
2244 NET_LOCK();
2245 PF_LOCK();
2246 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2247 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2248 PF_UNLOCK();
2249 NET_UNLOCK();
2250 break;
2251 }
2252
2253 case DIOCRGETTSTATS: {
2254 struct pfioc_table *io = (struct pfioc_table *)addr;
2255
2256 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2257 error = ENODEV;
2258 goto fail;
2259 }
2260 NET_LOCK();
2261 PF_LOCK();
2262 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2263 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2264 PF_UNLOCK();
2265 NET_UNLOCK();
2266 break;
2267 }
2268
2269 case DIOCRCLRTSTATS: {
2270 struct pfioc_table *io = (struct pfioc_table *)addr;
2271
2272 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2273 error = ENODEV;
2274 goto fail;
2275 }
2276 NET_LOCK();
2277 PF_LOCK();
2278 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2279 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2280 PF_UNLOCK();
2281 NET_UNLOCK();
2282 break;
2283 }
2284
2285 case DIOCRSETTFLAGS: {
2286 struct pfioc_table *io = (struct pfioc_table *)addr;
2287
2288 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2289 error = ENODEV;
2290 goto fail;
2291 }
2292 NET_LOCK();
2293 PF_LOCK();
2294 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2295 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2296 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2297 PF_UNLOCK();
2298 NET_UNLOCK();
2299 break;
2300 }
2301
2302 case DIOCRCLRADDRS: {
2303 struct pfioc_table *io = (struct pfioc_table *)addr;
2304
2305 if (io->pfrio_esize != 0) {
2306 error = ENODEV;
2307 goto fail;
2308 }
2309 NET_LOCK();
2310 PF_LOCK();
2311 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2312 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2313 PF_UNLOCK();
2314 NET_UNLOCK();
2315 break;
2316 }
2317
2318 case DIOCRADDADDRS: {
2319 struct pfioc_table *io = (struct pfioc_table *)addr;
2320
2321 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2322 error = ENODEV;
2323 goto fail;
2324 }
2325 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2326 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2327 PFR_FLAG_USERIOCTL);
2328 break;
2329 }
2330
2331 case DIOCRDELADDRS: {
2332 struct pfioc_table *io = (struct pfioc_table *)addr;
2333
2334 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2335 error = ENODEV;
2336 goto fail;
2337 }
2338 NET_LOCK();
2339 PF_LOCK();
2340 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2341 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2342 PFR_FLAG_USERIOCTL);
2343 PF_UNLOCK();
2344 NET_UNLOCK();
2345 break;
2346 }
2347
2348 case DIOCRSETADDRS: {
2349 struct pfioc_table *io = (struct pfioc_table *)addr;
2350
2351 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2352 error = ENODEV;
2353 goto fail;
2354 }
2355 NET_LOCK();
2356 PF_LOCK();
2357 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2358 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2359 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2360 PFR_FLAG_USERIOCTL, 0);
2361 PF_UNLOCK();
2362 NET_UNLOCK();
2363 break;
2364 }
2365
2366 case DIOCRGETADDRS: {
2367 struct pfioc_table *io = (struct pfioc_table *)addr;
2368
2369 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2370 error = ENODEV;
2371 goto fail;
2372 }
2373 NET_LOCK();
2374 PF_LOCK();
2375 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2376 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2377 PF_UNLOCK();
2378 NET_UNLOCK();
2379 break;
2380 }
2381
2382 case DIOCRGETASTATS: {
2383 struct pfioc_table *io = (struct pfioc_table *)addr;
2384
2385 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2386 error = ENODEV;
2387 goto fail;
2388 }
2389 NET_LOCK();
2390 PF_LOCK();
2391 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2392 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2393 PF_UNLOCK();
2394 NET_UNLOCK();
2395 break;
2396 }
2397
2398 case DIOCRCLRASTATS: {
2399 struct pfioc_table *io = (struct pfioc_table *)addr;
2400
2401 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2402 error = ENODEV;
2403 goto fail;
2404 }
2405 NET_LOCK();
2406 PF_LOCK();
2407 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2408 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2409 PFR_FLAG_USERIOCTL);
2410 PF_UNLOCK();
2411 NET_UNLOCK();
2412 break;
2413 }
2414
2415 case DIOCRTSTADDRS: {
2416 struct pfioc_table *io = (struct pfioc_table *)addr;
2417
2418 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2419 error = ENODEV;
2420 goto fail;
2421 }
2422 NET_LOCK();
2423 PF_LOCK();
2424 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2425 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2426 PFR_FLAG_USERIOCTL);
2427 PF_UNLOCK();
2428 NET_UNLOCK();
2429 break;
2430 }
2431
2432 case DIOCRINADEFINE: {
2433 struct pfioc_table *io = (struct pfioc_table *)addr;
2434
2435 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2436 error = ENODEV;
2437 goto fail;
2438 }
2439 NET_LOCK();
2440 PF_LOCK();
2441 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2442 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2443 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2444 PF_UNLOCK();
2445 NET_UNLOCK();
2446 break;
2447 }
2448
2449 case DIOCOSFPADD: {
2450 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2451 error = pf_osfp_add(io);
2452 break;
2453 }
2454
2455 case DIOCOSFPGET: {
2456 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2457 error = pf_osfp_get(io);
2458 break;
2459 }
2460
2461 case DIOCXBEGIN: {
2462 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2463 struct pfioc_trans_e *ioe;
2464 struct pfr_table *table;
2465 int i;
2466
2467 if (io->esize != sizeof(*ioe)) {
2468 error = ENODEV;
2469 goto fail;
2470 }
2471 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2472 table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2473 NET_LOCK();
2474 PF_LOCK();
2475 pf_default_rule_new = pf_default_rule;
2476 PF_UNLOCK();
2477 NET_UNLOCK();
2478 memset(&pf_trans_set, 0, sizeof(pf_trans_set));
2479 for (i = 0; i < io->size; i++) {
2480 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2481 free(table, M_TEMP, sizeof(*table));
2482 free(ioe, M_TEMP, sizeof(*ioe));
2483 error = EFAULT;
2484 goto fail;
2485 }
2486 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2487 sizeof(ioe->anchor)) {
2488 free(table, M_TEMP, sizeof(*table));
2489 free(ioe, M_TEMP, sizeof(*ioe));
2490 error = ENAMETOOLONG;
2491 goto fail;
2492 }
2493 NET_LOCK();
2494 PF_LOCK();
2495 switch (ioe->type) {
2496 case PF_TRANS_TABLE:
2497 memset(table, 0, sizeof(*table));
2498 strlcpy(table->pfrt_anchor, ioe->anchor,
2499 sizeof(table->pfrt_anchor));
2500 if ((error = pfr_ina_begin(table,
2501 &ioe->ticket, NULL, 0))) {
2502 PF_UNLOCK();
2503 NET_UNLOCK();
2504 free(table, M_TEMP, sizeof(*table));
2505 free(ioe, M_TEMP, sizeof(*ioe));
2506 goto fail;
2507 }
2508 break;
2509 case PF_TRANS_RULESET:
2510 if ((error = pf_begin_rules(&ioe->ticket,
2511 ioe->anchor))) {
2512 PF_UNLOCK();
2513 NET_UNLOCK();
2514 free(table, M_TEMP, sizeof(*table));
2515 free(ioe, M_TEMP, sizeof(*ioe));
2516 goto fail;
2517 }
2518 break;
2519 default:
2520 PF_UNLOCK();
2521 NET_UNLOCK();
2522 free(table, M_TEMP, sizeof(*table));
2523 free(ioe, M_TEMP, sizeof(*ioe));
2524 error = EINVAL;
2525 goto fail;
2526 }
2527 PF_UNLOCK();
2528 NET_UNLOCK();
2529 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2530 free(table, M_TEMP, sizeof(*table));
2531 free(ioe, M_TEMP, sizeof(*ioe));
2532 error = EFAULT;
2533 goto fail;
2534 }
2535 }
2536 free(table, M_TEMP, sizeof(*table));
2537 free(ioe, M_TEMP, sizeof(*ioe));
2538 break;
2539 }
2540
2541 case DIOCXROLLBACK: {
2542 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2543 struct pfioc_trans_e *ioe;
2544 struct pfr_table *table;
2545 int i;
2546
2547 if (io->esize != sizeof(*ioe)) {
2548 error = ENODEV;
2549 goto fail;
2550 }
2551 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2552 table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2553 for (i = 0; i < io->size; i++) {
2554 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2555 free(table, M_TEMP, sizeof(*table));
2556 free(ioe, M_TEMP, sizeof(*ioe));
2557 error = EFAULT;
2558 goto fail;
2559 }
2560 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2561 sizeof(ioe->anchor)) {
2562 free(table, M_TEMP, sizeof(*table));
2563 free(ioe, M_TEMP, sizeof(*ioe));
2564 error = ENAMETOOLONG;
2565 goto fail;
2566 }
2567 NET_LOCK();
2568 PF_LOCK();
2569 switch (ioe->type) {
2570 case PF_TRANS_TABLE:
2571 memset(table, 0, sizeof(*table));
2572 strlcpy(table->pfrt_anchor, ioe->anchor,
2573 sizeof(table->pfrt_anchor));
2574 if ((error = pfr_ina_rollback(table,
2575 ioe->ticket, NULL, 0))) {
2576 PF_UNLOCK();
2577 NET_UNLOCK();
2578 free(table, M_TEMP, sizeof(*table));
2579 free(ioe, M_TEMP, sizeof(*ioe));
2580 goto fail; /* really bad */
2581 }
2582 break;
2583 case PF_TRANS_RULESET:
2584 pf_rollback_rules(ioe->ticket, ioe->anchor);
2585 break;
2586 default:
2587 PF_UNLOCK();
2588 NET_UNLOCK();
2589 free(table, M_TEMP, sizeof(*table));
2590 free(ioe, M_TEMP, sizeof(*ioe));
2591 error = EINVAL;
2592 goto fail; /* really bad */
2593 }
2594 PF_UNLOCK();
2595 NET_UNLOCK();
2596 }
2597 free(table, M_TEMP, sizeof(*table));
2598 free(ioe, M_TEMP, sizeof(*ioe));
2599 break;
2600 }
2601
2602 case DIOCXCOMMIT: {
2603 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2604 struct pfioc_trans_e *ioe;
2605 struct pfr_table *table;
2606 struct pf_ruleset *rs;
2607 int i;
2608
2609 if (io->esize != sizeof(*ioe)) {
2610 error = ENODEV;
2611 goto fail;
2612 }
2613 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2614 table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2615 /* first makes sure everything will succeed */
2616 for (i = 0; i < io->size; i++) {
2617 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2618 free(table, M_TEMP, sizeof(*table));
2619 free(ioe, M_TEMP, sizeof(*ioe));
2620 error = EFAULT;
2621 goto fail;
2622 }
2623 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2624 sizeof(ioe->anchor)) {
2625 free(table, M_TEMP, sizeof(*table));
2626 free(ioe, M_TEMP, sizeof(*ioe));
2627 error = ENAMETOOLONG;
2628 goto fail;
2629 }
2630 NET_LOCK();
2631 PF_LOCK();
2632 switch (ioe->type) {
2633 case PF_TRANS_TABLE:
2634 rs = pf_find_ruleset(ioe->anchor);
2635 if (rs == NULL || !rs->topen || ioe->ticket !=
2636 rs->tticket) {
2637 PF_UNLOCK();
2638 NET_UNLOCK();
2639 free(table, M_TEMP, sizeof(*table));
2640 free(ioe, M_TEMP, sizeof(*ioe));
2641 error = EBUSY;
2642 goto fail;
2643 }
2644 break;
2645 case PF_TRANS_RULESET:
2646 rs = pf_find_ruleset(ioe->anchor);
2647 if (rs == NULL ||
2648 !rs->rules.inactive.open ||
2649 rs->rules.inactive.ticket !=
2650 ioe->ticket) {
2651 PF_UNLOCK();
2652 NET_UNLOCK();
2653 free(table, M_TEMP, sizeof(*table));
2654 free(ioe, M_TEMP, sizeof(*ioe));
2655 error = EBUSY;
2656 goto fail;
2657 }
2658 break;
2659 default:
2660 PF_UNLOCK();
2661 NET_UNLOCK();
2662 free(table, M_TEMP, sizeof(*table));
2663 free(ioe, M_TEMP, sizeof(*ioe));
2664 error = EINVAL;
2665 goto fail;
2666 }
2667 PF_UNLOCK();
2668 NET_UNLOCK();
2669 }
2670 NET_LOCK();
2671 PF_LOCK();
2672
2673 /*
2674 * Checked already in DIOCSETLIMIT, but check again as the
2675 * situation might have changed.
2676 */
2677 for (i = 0; i < PF_LIMIT_MAX; i++) {
2678 if (((struct pool *)pf_pool_limits[i].pp)->pr_nout >
2679 pf_pool_limits[i].limit_new) {
2680 PF_UNLOCK();
2681 NET_UNLOCK();
2682 free(table, M_TEMP, sizeof(*table));
2683 free(ioe, M_TEMP, sizeof(*ioe));
2684 error = EBUSY;
2685 goto fail;
2686 }
2687 }
2688 /* now do the commit - no errors should happen here */
2689 for (i = 0; i < io->size; i++) {
2690 PF_UNLOCK();
2691 NET_UNLOCK();
2692 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2693 free(table, M_TEMP, sizeof(*table));
2694 free(ioe, M_TEMP, sizeof(*ioe));
2695 error = EFAULT;
2696 goto fail;
2697 }
2698 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2699 sizeof(ioe->anchor)) {
2700 free(table, M_TEMP, sizeof(*table));
2701 free(ioe, M_TEMP, sizeof(*ioe));
2702 error = ENAMETOOLONG;
2703 goto fail;
2704 }
2705 NET_LOCK();
2706 PF_LOCK();
2707 switch (ioe->type) {
2708 case PF_TRANS_TABLE:
2709 memset(table, 0, sizeof(*table));
2710 strlcpy(table->pfrt_anchor, ioe->anchor,
2711 sizeof(table->pfrt_anchor));
2712 if ((error = pfr_ina_commit(table, ioe->ticket,
2713 NULL, NULL, 0))) {
2714 PF_UNLOCK();
2715 NET_UNLOCK();
2716 free(table, M_TEMP, sizeof(*table));
2717 free(ioe, M_TEMP, sizeof(*ioe));
2718 goto fail; /* really bad */
2719 }
2720 break;
2721 case PF_TRANS_RULESET:
2722 if ((error = pf_commit_rules(ioe->ticket,
2723 ioe->anchor))) {
2724 PF_UNLOCK();
2725 NET_UNLOCK();
2726 free(table, M_TEMP, sizeof(*table));
2727 free(ioe, M_TEMP, sizeof(*ioe));
2728 goto fail; /* really bad */
2729 }
2730 break;
2731 default:
2732 PF_UNLOCK();
2733 NET_UNLOCK();
2734 free(table, M_TEMP, sizeof(*table));
2735 free(ioe, M_TEMP, sizeof(*ioe));
2736 error = EINVAL;
2737 goto fail; /* really bad */
2738 }
2739 }
2740 for (i = 0; i < PF_LIMIT_MAX; i++) {
2741 if (pf_pool_limits[i].limit_new !=
2742 pf_pool_limits[i].limit &&
2743 pool_sethardlimit(pf_pool_limits[i].pp,
2744 pf_pool_limits[i].limit_new, NULL, 0) != 0) {
2745 PF_UNLOCK();
2746 NET_UNLOCK();
2747 free(table, M_TEMP, sizeof(*table));
2748 free(ioe, M_TEMP, sizeof(*ioe));
2749 error = EBUSY;
2750 goto fail; /* really bad */
2751 }
2752 pf_pool_limits[i].limit = pf_pool_limits[i].limit_new;
2753 }
2754 for (i = 0; i < PFTM_MAX; i++) {
2755 int old = pf_default_rule.timeout[i];
2756
2757 pf_default_rule.timeout[i] =
2758 pf_default_rule_new.timeout[i];
2759 if (pf_default_rule.timeout[i] == PFTM_INTERVAL &&
2760 pf_default_rule.timeout[i] < old)
2761 task_add(net_tq(0), &pf_purge_task);
2762 }
2763 pfi_xcommit();
2764 pf_trans_set_commit();
2765 PF_UNLOCK();
2766 NET_UNLOCK();
2767 free(table, M_TEMP, sizeof(*table));
2768 free(ioe, M_TEMP, sizeof(*ioe));
2769 break;
2770 }
2771
2772 case DIOCGETSRCNODES: {
2773 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2774 struct pf_src_node *n, *p, *pstore;
2775 u_int32_t nr = 0;
2776 size_t space = psn->psn_len;
2777
2778 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2779
2780 NET_LOCK();
2781 PF_LOCK();
2782 if (space == 0) {
2783 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2784 nr++;
2785 psn->psn_len = sizeof(struct pf_src_node) * nr;
2786 PF_UNLOCK();
2787 NET_UNLOCK();
2788 free(pstore, M_TEMP, sizeof(*pstore));
2789 goto fail;
2790 }
2791
2792 p = psn->psn_src_nodes;
2793 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2794 int secs = getuptime(), diff;
2795
2796 if ((nr + 1) * sizeof(*p) > psn->psn_len)
2797 break;
2798
2799 memcpy(pstore, n, sizeof(*pstore));
2800 memset(&pstore->entry, 0, sizeof(pstore->entry));
2801 pstore->rule.ptr = NULL;
2802 pstore->kif = NULL;
2803 pstore->rule.nr = n->rule.ptr->nr;
2804 pstore->creation = secs - pstore->creation;
2805 if (pstore->expire > secs)
2806 pstore->expire -= secs;
2807 else
2808 pstore->expire = 0;
2809
2810 /* adjust the connection rate estimate */
2811 diff = secs - n->conn_rate.last;
2812 if (diff >= n->conn_rate.seconds)
2813 pstore->conn_rate.count = 0;
2814 else
2815 pstore->conn_rate.count -=
2816 n->conn_rate.count * diff /
2817 n->conn_rate.seconds;
2818
2819 error = copyout(pstore, p, sizeof(*p));
2820 if (error) {
2821 PF_UNLOCK();
2822 NET_UNLOCK();
2823 free(pstore, M_TEMP, sizeof(*pstore));
2824 goto fail;
2825 }
2826 p++;
2827 nr++;
2828 }
2829 psn->psn_len = sizeof(struct pf_src_node) * nr;
2830
2831 PF_UNLOCK();
2832 NET_UNLOCK();
2833 free(pstore, M_TEMP, sizeof(*pstore));
2834 break;
2835 }
2836
2837 case DIOCCLRSRCNODES: {
2838 struct pf_src_node *n;
2839 struct pf_state *st;
2840
2841 NET_LOCK();
2842 PF_LOCK();
2843 PF_STATE_ENTER_WRITE();
2844 RBT_FOREACH(st, pf_state_tree_id, &tree_id)
2845 pf_src_tree_remove_state(st);
2846 PF_STATE_EXIT_WRITE();
2847 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2848 n->expire = 1;
2849 pf_purge_expired_src_nodes();
2850 PF_UNLOCK();
2851 NET_UNLOCK();
2852 break;
2853 }
2854
2855 case DIOCKILLSRCNODES: {
2856 struct pf_src_node *sn;
2857 struct pf_state *st;
2858 struct pfioc_src_node_kill *psnk =
2859 (struct pfioc_src_node_kill *)addr;
2860 u_int killed = 0;
2861
2862 NET_LOCK();
2863 PF_LOCK();
2864 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2865 if (pf_match_addr(psnk->psnk_src.neg,
2866 &psnk->psnk_src.addr.v.a.addr,
2867 &psnk->psnk_src.addr.v.a.mask,
2868 &sn->addr, sn->af) &&
2869 pf_match_addr(psnk->psnk_dst.neg,
2870 &psnk->psnk_dst.addr.v.a.addr,
2871 &psnk->psnk_dst.addr.v.a.mask,
2872 &sn->raddr, sn->af)) {
2873 /* Handle state to src_node linkage */
2874 if (sn->states != 0) {
2875 PF_ASSERT_LOCKED();
2876 PF_STATE_ENTER_WRITE();
2877 RBT_FOREACH(st, pf_state_tree_id,
2878 &tree_id)
2879 pf_state_rm_src_node(st, sn);
2880 PF_STATE_EXIT_WRITE();
2881 }
2882 sn->expire = 1;
2883 killed++;
2884 }
2885 }
2886
2887 if (killed > 0)
2888 pf_purge_expired_src_nodes();
2889
2890 psnk->psnk_killed = killed;
2891 PF_UNLOCK();
2892 NET_UNLOCK();
2893 break;
2894 }
2895
2896 case DIOCSETHOSTID: {
2897 u_int32_t *hostid = (u_int32_t *)addr;
2898
2899 NET_LOCK();
2900 PF_LOCK();
2901 if (*hostid == 0)
2902 pf_trans_set.hostid = arc4random();
2903 else
2904 pf_trans_set.hostid = *hostid;
2905 pf_trans_set.mask |= PF_TSET_HOSTID;
2906 PF_UNLOCK();
2907 NET_UNLOCK();
2908 break;
2909 }
2910
2911 case DIOCOSFPFLUSH:
2912 pf_osfp_flush();
2913 break;
2914
2915 case DIOCIGETIFACES: {
2916 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2917 struct pfi_kif *kif_buf;
2918 int apfiio_size = io->pfiio_size;
2919
2920 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2921 error = ENODEV;
2922 goto fail;
2923 }
2924
2925 if ((kif_buf = mallocarray(sizeof(*kif_buf), apfiio_size,
2926 M_TEMP, M_WAITOK|M_CANFAIL)) == NULL) {
2927 error = EINVAL;
2928 goto fail;
2929 }
2930
2931 NET_LOCK();
2932 PF_LOCK();
2933 pfi_get_ifaces(io->pfiio_name, kif_buf, &io->pfiio_size);
2934 PF_UNLOCK();
2935 NET_UNLOCK();
2936 if (copyout(kif_buf, io->pfiio_buffer, sizeof(*kif_buf) *
2937 io->pfiio_size))
2938 error = EFAULT;
2939 free(kif_buf, M_TEMP, sizeof(*kif_buf) * apfiio_size);
2940 break;
2941 }
2942
2943 case DIOCSETIFFLAG: {
2944 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2945
2946 if (io == NULL) {
2947 error = EINVAL;
2948 goto fail;
2949 }
2950
2951 NET_LOCK();
2952 PF_LOCK();
2953 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2954 PF_UNLOCK();
2955 NET_UNLOCK();
2956 break;
2957 }
2958
2959 case DIOCCLRIFFLAG: {
2960 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2961
2962 if (io == NULL) {
2963 error = EINVAL;
2964 goto fail;
2965 }
2966
2967 NET_LOCK();
2968 PF_LOCK();
2969 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2970 PF_UNLOCK();
2971 NET_UNLOCK();
2972 break;
2973 }
2974
2975 case DIOCSETREASS: {
2976 u_int32_t *reass = (u_int32_t *)addr;
2977
2978 NET_LOCK();
2979 PF_LOCK();
2980 pf_trans_set.reass = *reass;
2981 pf_trans_set.mask |= PF_TSET_REASS;
2982 PF_UNLOCK();
2983 NET_UNLOCK();
2984 break;
2985 }
2986
2987 case DIOCSETSYNFLWATS: {
2988 struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr;
2989
2990 NET_LOCK();
2991 PF_LOCK();
2992 error = pf_syncookies_setwats(io->hiwat, io->lowat);
2993 PF_UNLOCK();
2994 NET_UNLOCK();
2995 break;
2996 }
2997
2998 case DIOCGETSYNFLWATS: {
2999 struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr;
3000
3001 NET_LOCK();
3002 PF_LOCK();
3003 error = pf_syncookies_getwats(io);
3004 PF_UNLOCK();
3005 NET_UNLOCK();
3006 break;
3007 }
3008
3009 case DIOCSETSYNCOOKIES: {
3010 u_int8_t *mode = (u_int8_t *)addr;
3011
3012 NET_LOCK();
3013 PF_LOCK();
3014 error = pf_syncookies_setmode(*mode);
3015 PF_UNLOCK();
3016 NET_UNLOCK();
3017 break;
3018 }
3019
3020 default:
3021 error = ENODEV;
3022 break;
3023 }
3024 fail:
3025 if (flags & FWRITE)
3026 rw_exit_write(&pfioctl_rw);
3027 else
3028 rw_exit_read(&pfioctl_rw);
3029
3030 return (error);
3031 }
3032
3033 void
3034 pf_trans_set_commit(void)
3035 {
3036 if (pf_trans_set.mask & PF_TSET_STATUSIF)
3037 strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ);
3038 if (pf_trans_set.mask & PF_TSET_DEBUG)
3039 pf_status.debug = pf_trans_set.debug;
3040 if (pf_trans_set.mask & PF_TSET_HOSTID)
3041 pf_status.hostid = pf_trans_set.hostid;
3042 if (pf_trans_set.mask & PF_TSET_REASS)
3043 pf_status.reass = pf_trans_set.reass;
3044 }
3045
3046 void
3047 pf_pool_copyin(struct pf_pool *from, struct pf_pool *to)
3048 {
3049 memmove(to, from, sizeof(*to));
3050 to->kif = NULL;
3051 to->addr.p.tbl = NULL;
3052 }
3053
3054 int
3055 pf_validate_range(u_int8_t op, u_int16_t port[2], int order)
3056 {
3057 u_int16_t a = (order == PF_ORDER_NET) ? ntohs(port[0]) : port[0];
3058 u_int16_t b = (order == PF_ORDER_NET) ? ntohs(port[1]) : port[1];
3059
3060 if ((op == PF_OP_RRG && a > b) || /* 34:12, i.e. none */
3061 (op == PF_OP_IRG && a >= b) || /* 34><12, i.e. none */
3062 (op == PF_OP_XRG && a > b)) /* 34<>22, i.e. all */
3063 return 1;
3064 return 0;
3065 }
3066
3067 int
3068 pf_rule_copyin(struct pf_rule *from, struct pf_rule *to)
3069 {
3070 int i;
3071
3072 if (from->scrub_flags & PFSTATE_SETPRIO &&
3073 (from->set_prio[0] > IFQ_MAXPRIO ||
3074 from->set_prio[1] > IFQ_MAXPRIO))
3075 return (EINVAL);
3076
3077 to->src = from->src;
3078 to->src.addr.p.tbl = NULL;
3079 to->dst = from->dst;
3080 to->dst.addr.p.tbl = NULL;
3081
3082 if (pf_validate_range(to->src.port_op, to->src.port, PF_ORDER_NET))
3083 return (EINVAL);
3084 if (pf_validate_range(to->dst.port_op, to->dst.port, PF_ORDER_NET))
3085 return (EINVAL);
3086
3087 /* XXX union skip[] */
3088
3089 strlcpy(to->label, from->label, sizeof(to->label));
3090 strlcpy(to->ifname, from->ifname, sizeof(to->ifname));
3091 strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname));
3092 strlcpy(to->qname, from->qname, sizeof(to->qname));
3093 strlcpy(to->pqname, from->pqname, sizeof(to->pqname));
3094 strlcpy(to->tagname, from->tagname, sizeof(to->tagname));
3095 strlcpy(to->match_tagname, from->match_tagname,
3096 sizeof(to->match_tagname));
3097 strlcpy(to->overload_tblname, from->overload_tblname,
3098 sizeof(to->overload_tblname));
3099
3100 pf_pool_copyin(&from->nat, &to->nat);
3101 pf_pool_copyin(&from->rdr, &to->rdr);
3102 pf_pool_copyin(&from->route, &to->route);
3103
3104 if (pf_validate_range(to->rdr.port_op, to->rdr.proxy_port,
3105 PF_ORDER_HOST))
3106 return (EINVAL);
3107
3108 to->kif = (to->ifname[0]) ?
3109 pfi_kif_alloc(to->ifname, M_WAITOK) : NULL;
3110 to->rcv_kif = (to->rcv_ifname[0]) ?
3111 pfi_kif_alloc(to->rcv_ifname, M_WAITOK) : NULL;
3112 to->rdr.kif = (to->rdr.ifname[0]) ?
3113 pfi_kif_alloc(to->rdr.ifname, M_WAITOK) : NULL;
3114 to->nat.kif = (to->nat.ifname[0]) ?
3115 pfi_kif_alloc(to->nat.ifname, M_WAITOK) : NULL;
3116 to->route.kif = (to->route.ifname[0]) ?
3117 pfi_kif_alloc(to->route.ifname, M_WAITOK) : NULL;
3118
3119 to->os_fingerprint = from->os_fingerprint;
3120
3121 to->rtableid = from->rtableid;
3122 if (to->rtableid >= 0 && !rtable_exists(to->rtableid))
3123 return (EBUSY);
3124 to->onrdomain = from->onrdomain;
3125 if (to->onrdomain != -1 && (to->onrdomain < 0 ||
3126 to->onrdomain > RT_TABLEID_MAX))
3127 return (EINVAL);
3128
3129 for (i = 0; i < PFTM_MAX; i++)
3130 to->timeout[i] = from->timeout[i];
3131 to->states_tot = from->states_tot;
3132 to->max_states = from->max_states;
3133 to->max_src_nodes = from->max_src_nodes;
3134 to->max_src_states = from->max_src_states;
3135 to->max_src_conn = from->max_src_conn;
3136 to->max_src_conn_rate.limit = from->max_src_conn_rate.limit;
3137 to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds;
3138 pf_init_threshold(&to->pktrate, from->pktrate.limit,
3139 from->pktrate.seconds);
3140
3141 if (to->qname[0] != 0) {
3142 if ((to->qid = pf_qname2qid(to->qname, 0)) == 0)
3143 return (EBUSY);
3144 if (to->pqname[0] != 0) {
3145 if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0)
3146 return (EBUSY);
3147 } else
3148 to->pqid = to->qid;
3149 }
3150 to->rt_listid = from->rt_listid;
3151 to->prob = from->prob;
3152 to->return_icmp = from->return_icmp;
3153 to->return_icmp6 = from->return_icmp6;
3154 to->max_mss = from->max_mss;
3155 if (to->tagname[0])
3156 if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0)
3157 return (EBUSY);
3158 if (to->match_tagname[0])
3159 if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0)
3160 return (EBUSY);
3161 to->scrub_flags = from->scrub_flags;
3162 to->delay = from->delay;
3163 to->uid = from->uid;
3164 to->gid = from->gid;
3165 to->rule_flag = from->rule_flag;
3166 to->action = from->action;
3167 to->direction = from->direction;
3168 to->log = from->log;
3169 to->logif = from->logif;
3170 #if NPFLOG > 0
3171 if (!to->log)
3172 to->logif = 0;
3173 #endif /* NPFLOG > 0 */
3174 to->quick = from->quick;
3175 to->ifnot = from->ifnot;
3176 to->rcvifnot = from->rcvifnot;
3177 to->match_tag_not = from->match_tag_not;
3178 to->keep_state = from->keep_state;
3179 to->af = from->af;
3180 to->naf = from->naf;
3181 to->proto = from->proto;
3182 to->type = from->type;
3183 to->code = from->code;
3184 to->flags = from->flags;
3185 to->flagset = from->flagset;
3186 to->min_ttl = from->min_ttl;
3187 to->allow_opts = from->allow_opts;
3188 to->rt = from->rt;
3189 to->return_ttl = from->return_ttl;
3190 to->tos = from->tos;
3191 to->set_tos = from->set_tos;
3192 to->anchor_relative = from->anchor_relative; /* XXX */
3193 to->anchor_wildcard = from->anchor_wildcard; /* XXX */
3194 to->flush = from->flush;
3195 to->divert.addr = from->divert.addr;
3196 to->divert.port = from->divert.port;
3197 to->divert.type = from->divert.type;
3198 to->prio = from->prio;
3199 to->set_prio[0] = from->set_prio[0];
3200 to->set_prio[1] = from->set_prio[1];
3201
3202 return (0);
3203 }
3204
3205 int
3206 pf_rule_checkaf(struct pf_rule *r)
3207 {
3208 switch (r->af) {
3209 case 0:
3210 if (r->rule_flag & PFRULE_AFTO)
3211 return (EPFNOSUPPORT);
3212 break;
3213 case AF_INET:
3214 if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET6)
3215 return (EPFNOSUPPORT);
3216 break;
3217 #ifdef INET6
3218 case AF_INET6:
3219 if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET)
3220 return (EPFNOSUPPORT);
3221 break;
3222 #endif /* INET6 */
3223 default:
3224 return (EPFNOSUPPORT);
3225 }
3226
3227 if ((r->rule_flag & PFRULE_AFTO) == 0 && r->naf != 0)
3228 return (EPFNOSUPPORT);
3229
3230 return (0);
3231 }
3232
3233 int
3234 pf_sysctl(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
3235 {
3236 struct pf_status pfs;
3237
3238 NET_LOCK_SHARED();
3239 PF_LOCK();
3240 memcpy(&pfs, &pf_status, sizeof(struct pf_status));
3241 pfi_update_status(pfs.ifname, &pfs);
3242 PF_UNLOCK();
3243 NET_UNLOCK_SHARED();
3244
3245 return sysctl_rdstruct(oldp, oldlenp, newp, &pfs, sizeof(pfs));
3246 }
Cache object: c7f24d94a43133bd692dcb758ed9fb15
|