1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2002 Cedric Berger
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/mutex.h>
46 #include <sys/refcount.h>
47 #include <sys/socket.h>
48 #include <vm/uma.h>
49
50 #include <net/if.h>
51 #include <net/vnet.h>
52 #include <net/pfvar.h>
53
54 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
55
56 #define ACCEPT_FLAGS(flags, oklist) \
57 do { \
58 if ((flags & ~(oklist)) & \
59 PFR_FLAG_ALLMASK) \
60 return (EINVAL); \
61 } while (0)
62
63 #define FILLIN_SIN(sin, addr) \
64 do { \
65 (sin).sin_len = sizeof(sin); \
66 (sin).sin_family = AF_INET; \
67 (sin).sin_addr = (addr); \
68 } while (0)
69
70 #define FILLIN_SIN6(sin6, addr) \
71 do { \
72 (sin6).sin6_len = sizeof(sin6); \
73 (sin6).sin6_family = AF_INET6; \
74 (sin6).sin6_addr = (addr); \
75 } while (0)
76
77 #define SWAP(type, a1, a2) \
78 do { \
79 type tmp = a1; \
80 a1 = a2; \
81 a2 = tmp; \
82 } while (0)
83
84 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
85 (struct pf_addr *)&(su)->sin.sin_addr : \
86 (struct pf_addr *)&(su)->sin6.sin6_addr)
87
88 #define AF_BITS(af) (((af)==AF_INET)?32:128)
89 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
90 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
91 #define KENTRY_RNF_ROOT(ke) \
92 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
93
94 #define NO_ADDRESSES (-1)
95 #define ENQUEUE_UNMARKED_ONLY (1)
96 #define INVERT_NEG_FLAG (1)
97
98 struct pfr_walktree {
99 enum pfrw_op {
100 PFRW_MARK,
101 PFRW_SWEEP,
102 PFRW_ENQUEUE,
103 PFRW_GET_ADDRS,
104 PFRW_GET_ASTATS,
105 PFRW_POOL_GET,
106 PFRW_DYNADDR_UPDATE,
107 PFRW_COUNTERS
108 } pfrw_op;
109 union {
110 struct pfr_addr *pfrw1_addr;
111 struct pfr_astats *pfrw1_astats;
112 struct pfr_kentryworkq *pfrw1_workq;
113 struct pfr_kentry *pfrw1_kentry;
114 struct pfi_dynaddr *pfrw1_dyn;
115 } pfrw_1;
116 int pfrw_free;
117 int pfrw_flags;
118 };
119 #define pfrw_addr pfrw_1.pfrw1_addr
120 #define pfrw_astats pfrw_1.pfrw1_astats
121 #define pfrw_workq pfrw_1.pfrw1_workq
122 #define pfrw_kentry pfrw_1.pfrw1_kentry
123 #define pfrw_dyn pfrw_1.pfrw1_dyn
124 #define pfrw_cnt pfrw_free
125
126 #define senderr(e) do { rv = (e); goto _bad; } while (0)
127
128 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
129 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z);
130 #define V_pfr_kentry_z VNET(pfr_kentry_z)
131 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_counter_z);
132 #define V_pfr_kentry_counter_z VNET(pfr_kentry_counter_z)
133
134 static struct pf_addr pfr_ffaddr = {
135 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
136 };
137
138 static void pfr_copyout_astats(struct pfr_astats *,
139 const struct pfr_kentry *,
140 const struct pfr_walktree *);
141 static void pfr_copyout_addr(struct pfr_addr *,
142 const struct pfr_kentry *ke);
143 static int pfr_validate_addr(struct pfr_addr *);
144 static void pfr_enqueue_addrs(struct pfr_ktable *,
145 struct pfr_kentryworkq *, int *, int);
146 static void pfr_mark_addrs(struct pfr_ktable *);
147 static struct pfr_kentry
148 *pfr_lookup_addr(struct pfr_ktable *,
149 struct pfr_addr *, int);
150 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool);
151 static void pfr_destroy_kentries(struct pfr_kentryworkq *);
152 static void pfr_destroy_kentry(struct pfr_kentry *);
153 static void pfr_insert_kentries(struct pfr_ktable *,
154 struct pfr_kentryworkq *, long);
155 static void pfr_remove_kentries(struct pfr_ktable *,
156 struct pfr_kentryworkq *);
157 static void pfr_clstats_kentries(struct pfr_ktable *,
158 struct pfr_kentryworkq *, long, int);
159 static void pfr_reset_feedback(struct pfr_addr *, int);
160 static void pfr_prepare_network(union sockaddr_union *, int, int);
161 static int pfr_route_kentry(struct pfr_ktable *,
162 struct pfr_kentry *);
163 static int pfr_unroute_kentry(struct pfr_ktable *,
164 struct pfr_kentry *);
165 static int pfr_walktree(struct radix_node *, void *);
166 static int pfr_validate_table(struct pfr_table *, int, int);
167 static int pfr_fix_anchor(char *);
168 static void pfr_commit_ktable(struct pfr_ktable *, long);
169 static void pfr_insert_ktables(struct pfr_ktableworkq *);
170 static void pfr_insert_ktable(struct pfr_ktable *);
171 static void pfr_setflags_ktables(struct pfr_ktableworkq *);
172 static void pfr_setflags_ktable(struct pfr_ktable *, int);
173 static void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
174 int);
175 static void pfr_clstats_ktable(struct pfr_ktable *, long, int);
176 static struct pfr_ktable
177 *pfr_create_ktable(struct pfr_table *, long, int);
178 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
179 static void pfr_destroy_ktable(struct pfr_ktable *, int);
180 static int pfr_ktable_compare(struct pfr_ktable *,
181 struct pfr_ktable *);
182 static struct pfr_ktable
183 *pfr_lookup_table(struct pfr_table *);
184 static void pfr_clean_node_mask(struct pfr_ktable *,
185 struct pfr_kentryworkq *);
186 static int pfr_skip_table(struct pfr_table *,
187 struct pfr_ktable *, int);
188 static struct pfr_kentry
189 *pfr_kentry_byidx(struct pfr_ktable *, int, int);
190
191 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
192 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
193
194 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables);
195 #define V_pfr_ktables VNET(pfr_ktables)
196
197 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable);
198 #define V_pfr_nulltable VNET(pfr_nulltable)
199
200 VNET_DEFINE_STATIC(int, pfr_ktable_cnt);
201 #define V_pfr_ktable_cnt VNET(pfr_ktable_cnt)
202
203 void
204 pfr_initialize(void)
205 {
206
207 V_pfr_kentry_counter_z = uma_zcreate("pf table entry counters",
208 PFR_NUM_COUNTERS * sizeof(uint64_t), NULL, NULL, NULL, NULL,
209 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
210 V_pfr_kentry_z = uma_zcreate("pf table entries",
211 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
212 0);
213 uma_zone_set_max(V_pfr_kentry_z, PFR_KENTRY_HIWAT);
214 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
215 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
216 }
217
218 void
219 pfr_cleanup(void)
220 {
221
222 uma_zdestroy(V_pfr_kentry_z);
223 uma_zdestroy(V_pfr_kentry_counter_z);
224 }
225
226 int
227 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
228 {
229 struct pfr_ktable *kt;
230 struct pfr_kentryworkq workq;
231
232 PF_RULES_WASSERT();
233
234 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
235 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
236 return (EINVAL);
237 kt = pfr_lookup_table(tbl);
238 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
239 return (ESRCH);
240 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
241 return (EPERM);
242 pfr_enqueue_addrs(kt, &workq, ndel, 0);
243
244 if (!(flags & PFR_FLAG_DUMMY)) {
245 pfr_remove_kentries(kt, &workq);
246 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
247 }
248 return (0);
249 }
250
251 int
252 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
253 int *nadd, int flags)
254 {
255 struct pfr_ktable *kt, *tmpkt;
256 struct pfr_kentryworkq workq;
257 struct pfr_kentry *p, *q;
258 struct pfr_addr *ad;
259 int i, rv, xadd = 0;
260 long tzero = time_second;
261
262 PF_RULES_WASSERT();
263
264 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
265 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
266 return (EINVAL);
267 kt = pfr_lookup_table(tbl);
268 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
269 return (ESRCH);
270 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
271 return (EPERM);
272 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
273 if (tmpkt == NULL)
274 return (ENOMEM);
275 SLIST_INIT(&workq);
276 for (i = 0, ad = addr; i < size; i++, ad++) {
277 if (pfr_validate_addr(ad))
278 senderr(EINVAL);
279 p = pfr_lookup_addr(kt, ad, 1);
280 q = pfr_lookup_addr(tmpkt, ad, 1);
281 if (flags & PFR_FLAG_FEEDBACK) {
282 if (q != NULL)
283 ad->pfra_fback = PFR_FB_DUPLICATE;
284 else if (p == NULL)
285 ad->pfra_fback = PFR_FB_ADDED;
286 else if (p->pfrke_not != ad->pfra_not)
287 ad->pfra_fback = PFR_FB_CONFLICT;
288 else
289 ad->pfra_fback = PFR_FB_NONE;
290 }
291 if (p == NULL && q == NULL) {
292 p = pfr_create_kentry(ad,
293 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
294 if (p == NULL)
295 senderr(ENOMEM);
296 if (pfr_route_kentry(tmpkt, p)) {
297 pfr_destroy_kentry(p);
298 ad->pfra_fback = PFR_FB_NONE;
299 } else {
300 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
301 xadd++;
302 }
303 }
304 }
305 pfr_clean_node_mask(tmpkt, &workq);
306 if (!(flags & PFR_FLAG_DUMMY))
307 pfr_insert_kentries(kt, &workq, tzero);
308 else
309 pfr_destroy_kentries(&workq);
310 if (nadd != NULL)
311 *nadd = xadd;
312 pfr_destroy_ktable(tmpkt, 0);
313 return (0);
314 _bad:
315 pfr_clean_node_mask(tmpkt, &workq);
316 pfr_destroy_kentries(&workq);
317 if (flags & PFR_FLAG_FEEDBACK)
318 pfr_reset_feedback(addr, size);
319 pfr_destroy_ktable(tmpkt, 0);
320 return (rv);
321 }
322
323 int
324 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
325 int *ndel, int flags)
326 {
327 struct pfr_ktable *kt;
328 struct pfr_kentryworkq workq;
329 struct pfr_kentry *p;
330 struct pfr_addr *ad;
331 int i, rv, xdel = 0, log = 1;
332
333 PF_RULES_WASSERT();
334
335 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
336 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
337 return (EINVAL);
338 kt = pfr_lookup_table(tbl);
339 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
340 return (ESRCH);
341 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
342 return (EPERM);
343 /*
344 * there are two algorithms to choose from here.
345 * with:
346 * n: number of addresses to delete
347 * N: number of addresses in the table
348 *
349 * one is O(N) and is better for large 'n'
350 * one is O(n*LOG(N)) and is better for small 'n'
351 *
352 * following code try to decide which one is best.
353 */
354 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
355 log++;
356 if (size > kt->pfrkt_cnt/log) {
357 /* full table scan */
358 pfr_mark_addrs(kt);
359 } else {
360 /* iterate over addresses to delete */
361 for (i = 0, ad = addr; i < size; i++, ad++) {
362 if (pfr_validate_addr(ad))
363 return (EINVAL);
364 p = pfr_lookup_addr(kt, ad, 1);
365 if (p != NULL)
366 p->pfrke_mark = 0;
367 }
368 }
369 SLIST_INIT(&workq);
370 for (i = 0, ad = addr; i < size; i++, ad++) {
371 if (pfr_validate_addr(ad))
372 senderr(EINVAL);
373 p = pfr_lookup_addr(kt, ad, 1);
374 if (flags & PFR_FLAG_FEEDBACK) {
375 if (p == NULL)
376 ad->pfra_fback = PFR_FB_NONE;
377 else if (p->pfrke_not != ad->pfra_not)
378 ad->pfra_fback = PFR_FB_CONFLICT;
379 else if (p->pfrke_mark)
380 ad->pfra_fback = PFR_FB_DUPLICATE;
381 else
382 ad->pfra_fback = PFR_FB_DELETED;
383 }
384 if (p != NULL && p->pfrke_not == ad->pfra_not &&
385 !p->pfrke_mark) {
386 p->pfrke_mark = 1;
387 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
388 xdel++;
389 }
390 }
391 if (!(flags & PFR_FLAG_DUMMY))
392 pfr_remove_kentries(kt, &workq);
393 if (ndel != NULL)
394 *ndel = xdel;
395 return (0);
396 _bad:
397 if (flags & PFR_FLAG_FEEDBACK)
398 pfr_reset_feedback(addr, size);
399 return (rv);
400 }
401
402 int
403 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
404 int *size2, int *nadd, int *ndel, int *nchange, int flags,
405 u_int32_t ignore_pfrt_flags)
406 {
407 struct pfr_ktable *kt, *tmpkt;
408 struct pfr_kentryworkq addq, delq, changeq;
409 struct pfr_kentry *p, *q;
410 struct pfr_addr ad;
411 int i, rv, xadd = 0, xdel = 0, xchange = 0;
412 long tzero = time_second;
413
414 PF_RULES_WASSERT();
415
416 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
417 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
418 PFR_FLAG_USERIOCTL))
419 return (EINVAL);
420 kt = pfr_lookup_table(tbl);
421 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
422 return (ESRCH);
423 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
424 return (EPERM);
425 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
426 if (tmpkt == NULL)
427 return (ENOMEM);
428 pfr_mark_addrs(kt);
429 SLIST_INIT(&addq);
430 SLIST_INIT(&delq);
431 SLIST_INIT(&changeq);
432 for (i = 0; i < size; i++) {
433 /*
434 * XXXGL: undertand pf_if usage of this function
435 * and make ad a moving pointer
436 */
437 bcopy(addr + i, &ad, sizeof(ad));
438 if (pfr_validate_addr(&ad))
439 senderr(EINVAL);
440 ad.pfra_fback = PFR_FB_NONE;
441 p = pfr_lookup_addr(kt, &ad, 1);
442 if (p != NULL) {
443 if (p->pfrke_mark) {
444 ad.pfra_fback = PFR_FB_DUPLICATE;
445 goto _skip;
446 }
447 p->pfrke_mark = 1;
448 if (p->pfrke_not != ad.pfra_not) {
449 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
450 ad.pfra_fback = PFR_FB_CHANGED;
451 xchange++;
452 }
453 } else {
454 q = pfr_lookup_addr(tmpkt, &ad, 1);
455 if (q != NULL) {
456 ad.pfra_fback = PFR_FB_DUPLICATE;
457 goto _skip;
458 }
459 p = pfr_create_kentry(&ad,
460 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
461 if (p == NULL)
462 senderr(ENOMEM);
463 if (pfr_route_kentry(tmpkt, p)) {
464 pfr_destroy_kentry(p);
465 ad.pfra_fback = PFR_FB_NONE;
466 } else {
467 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
468 ad.pfra_fback = PFR_FB_ADDED;
469 xadd++;
470 }
471 }
472 _skip:
473 if (flags & PFR_FLAG_FEEDBACK)
474 bcopy(&ad, addr + i, sizeof(ad));
475 }
476 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
477 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
478 if (*size2 < size+xdel) {
479 *size2 = size+xdel;
480 senderr(0);
481 }
482 i = 0;
483 SLIST_FOREACH(p, &delq, pfrke_workq) {
484 pfr_copyout_addr(&ad, p);
485 ad.pfra_fback = PFR_FB_DELETED;
486 bcopy(&ad, addr + size + i, sizeof(ad));
487 i++;
488 }
489 }
490 pfr_clean_node_mask(tmpkt, &addq);
491 if (!(flags & PFR_FLAG_DUMMY)) {
492 pfr_insert_kentries(kt, &addq, tzero);
493 pfr_remove_kentries(kt, &delq);
494 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
495 } else
496 pfr_destroy_kentries(&addq);
497 if (nadd != NULL)
498 *nadd = xadd;
499 if (ndel != NULL)
500 *ndel = xdel;
501 if (nchange != NULL)
502 *nchange = xchange;
503 if ((flags & PFR_FLAG_FEEDBACK) && size2)
504 *size2 = size+xdel;
505 pfr_destroy_ktable(tmpkt, 0);
506 return (0);
507 _bad:
508 pfr_clean_node_mask(tmpkt, &addq);
509 pfr_destroy_kentries(&addq);
510 if (flags & PFR_FLAG_FEEDBACK)
511 pfr_reset_feedback(addr, size);
512 pfr_destroy_ktable(tmpkt, 0);
513 return (rv);
514 }
515
516 int
517 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
518 int *nmatch, int flags)
519 {
520 struct pfr_ktable *kt;
521 struct pfr_kentry *p;
522 struct pfr_addr *ad;
523 int i, xmatch = 0;
524
525 PF_RULES_RASSERT();
526
527 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
528 if (pfr_validate_table(tbl, 0, 0))
529 return (EINVAL);
530 kt = pfr_lookup_table(tbl);
531 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
532 return (ESRCH);
533
534 for (i = 0, ad = addr; i < size; i++, ad++) {
535 if (pfr_validate_addr(ad))
536 return (EINVAL);
537 if (ADDR_NETWORK(ad))
538 return (EINVAL);
539 p = pfr_lookup_addr(kt, ad, 0);
540 if (flags & PFR_FLAG_REPLACE)
541 pfr_copyout_addr(ad, p);
542 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
543 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
544 if (p != NULL && !p->pfrke_not)
545 xmatch++;
546 }
547 if (nmatch != NULL)
548 *nmatch = xmatch;
549 return (0);
550 }
551
552 int
553 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
554 int flags)
555 {
556 struct pfr_ktable *kt;
557 struct pfr_walktree w;
558 int rv;
559
560 PF_RULES_RASSERT();
561
562 ACCEPT_FLAGS(flags, 0);
563 if (pfr_validate_table(tbl, 0, 0))
564 return (EINVAL);
565 kt = pfr_lookup_table(tbl);
566 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
567 return (ESRCH);
568 if (kt->pfrkt_cnt > *size) {
569 *size = kt->pfrkt_cnt;
570 return (0);
571 }
572
573 bzero(&w, sizeof(w));
574 w.pfrw_op = PFRW_GET_ADDRS;
575 w.pfrw_addr = addr;
576 w.pfrw_free = kt->pfrkt_cnt;
577 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
578 if (!rv)
579 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
580 pfr_walktree, &w);
581 if (rv)
582 return (rv);
583
584 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
585 w.pfrw_free));
586
587 *size = kt->pfrkt_cnt;
588 return (0);
589 }
590
591 int
592 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
593 int flags)
594 {
595 struct pfr_ktable *kt;
596 struct pfr_walktree w;
597 struct pfr_kentryworkq workq;
598 int rv;
599 long tzero = time_second;
600
601 PF_RULES_RASSERT();
602
603 /* XXX PFR_FLAG_CLSTATS disabled */
604 ACCEPT_FLAGS(flags, 0);
605 if (pfr_validate_table(tbl, 0, 0))
606 return (EINVAL);
607 kt = pfr_lookup_table(tbl);
608 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
609 return (ESRCH);
610 if (kt->pfrkt_cnt > *size) {
611 *size = kt->pfrkt_cnt;
612 return (0);
613 }
614
615 bzero(&w, sizeof(w));
616 w.pfrw_op = PFRW_GET_ASTATS;
617 w.pfrw_astats = addr;
618 w.pfrw_free = kt->pfrkt_cnt;
619 /*
620 * Flags below are for backward compatibility. It was possible to have
621 * a table without per-entry counters. Now they are always allocated,
622 * we just discard data when reading it if table is not configured to
623 * have counters.
624 */
625 w.pfrw_flags = kt->pfrkt_flags;
626 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
627 if (!rv)
628 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
629 pfr_walktree, &w);
630 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
631 pfr_enqueue_addrs(kt, &workq, NULL, 0);
632 pfr_clstats_kentries(kt, &workq, tzero, 0);
633 }
634 if (rv)
635 return (rv);
636
637 if (w.pfrw_free) {
638 printf("pfr_get_astats: corruption detected (%d).\n",
639 w.pfrw_free);
640 return (ENOTTY);
641 }
642 *size = kt->pfrkt_cnt;
643 return (0);
644 }
645
646 int
647 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
648 int *nzero, int flags)
649 {
650 struct pfr_ktable *kt;
651 struct pfr_kentryworkq workq;
652 struct pfr_kentry *p;
653 struct pfr_addr *ad;
654 int i, rv, xzero = 0;
655
656 PF_RULES_WASSERT();
657
658 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
659 if (pfr_validate_table(tbl, 0, 0))
660 return (EINVAL);
661 kt = pfr_lookup_table(tbl);
662 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
663 return (ESRCH);
664 SLIST_INIT(&workq);
665 for (i = 0, ad = addr; i < size; i++, ad++) {
666 if (pfr_validate_addr(ad))
667 senderr(EINVAL);
668 p = pfr_lookup_addr(kt, ad, 1);
669 if (flags & PFR_FLAG_FEEDBACK) {
670 ad->pfra_fback = (p != NULL) ?
671 PFR_FB_CLEARED : PFR_FB_NONE;
672 }
673 if (p != NULL) {
674 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
675 xzero++;
676 }
677 }
678
679 if (!(flags & PFR_FLAG_DUMMY))
680 pfr_clstats_kentries(kt, &workq, 0, 0);
681 if (nzero != NULL)
682 *nzero = xzero;
683 return (0);
684 _bad:
685 if (flags & PFR_FLAG_FEEDBACK)
686 pfr_reset_feedback(addr, size);
687 return (rv);
688 }
689
690 static int
691 pfr_validate_addr(struct pfr_addr *ad)
692 {
693 int i;
694
695 switch (ad->pfra_af) {
696 #ifdef INET
697 case AF_INET:
698 if (ad->pfra_net > 32)
699 return (-1);
700 break;
701 #endif /* INET */
702 #ifdef INET6
703 case AF_INET6:
704 if (ad->pfra_net > 128)
705 return (-1);
706 break;
707 #endif /* INET6 */
708 default:
709 return (-1);
710 }
711 if (ad->pfra_net < 128 &&
712 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
713 return (-1);
714 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
715 if (((caddr_t)ad)[i])
716 return (-1);
717 if (ad->pfra_not && ad->pfra_not != 1)
718 return (-1);
719 if (ad->pfra_fback)
720 return (-1);
721 return (0);
722 }
723
724 static void
725 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
726 int *naddr, int sweep)
727 {
728 struct pfr_walktree w;
729
730 SLIST_INIT(workq);
731 bzero(&w, sizeof(w));
732 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
733 w.pfrw_workq = workq;
734 if (kt->pfrkt_ip4 != NULL)
735 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh,
736 pfr_walktree, &w))
737 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
738 if (kt->pfrkt_ip6 != NULL)
739 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
740 pfr_walktree, &w))
741 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
742 if (naddr != NULL)
743 *naddr = w.pfrw_cnt;
744 }
745
746 static void
747 pfr_mark_addrs(struct pfr_ktable *kt)
748 {
749 struct pfr_walktree w;
750
751 bzero(&w, sizeof(w));
752 w.pfrw_op = PFRW_MARK;
753 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w))
754 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
755 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w))
756 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
757 }
758
759 static struct pfr_kentry *
760 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
761 {
762 union sockaddr_union sa, mask;
763 struct radix_head *head = NULL;
764 struct pfr_kentry *ke;
765
766 PF_RULES_ASSERT();
767
768 bzero(&sa, sizeof(sa));
769 if (ad->pfra_af == AF_INET) {
770 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
771 head = &kt->pfrkt_ip4->rh;
772 } else if ( ad->pfra_af == AF_INET6 ) {
773 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
774 head = &kt->pfrkt_ip6->rh;
775 }
776 if (ADDR_NETWORK(ad)) {
777 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
778 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
779 if (ke && KENTRY_RNF_ROOT(ke))
780 ke = NULL;
781 } else {
782 ke = (struct pfr_kentry *)rn_match(&sa, head);
783 if (ke && KENTRY_RNF_ROOT(ke))
784 ke = NULL;
785 if (exact && ke && KENTRY_NETWORK(ke))
786 ke = NULL;
787 }
788 return (ke);
789 }
790
791 static struct pfr_kentry *
792 pfr_create_kentry(struct pfr_addr *ad, bool counters)
793 {
794 struct pfr_kentry *ke;
795 counter_u64_t c;
796
797 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
798 if (ke == NULL)
799 return (NULL);
800
801 if (ad->pfra_af == AF_INET)
802 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
803 else if (ad->pfra_af == AF_INET6)
804 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
805 ke->pfrke_af = ad->pfra_af;
806 ke->pfrke_net = ad->pfra_net;
807 ke->pfrke_not = ad->pfra_not;
808 ke->pfrke_counters.pfrkc_tzero = 0;
809 if (counters) {
810 c = uma_zalloc_pcpu(V_pfr_kentry_counter_z, M_NOWAIT | M_ZERO);
811 if (c == NULL) {
812 pfr_destroy_kentry(ke);
813 return (NULL);
814 }
815 ke->pfrke_counters.pfrkc_counters = c;
816 }
817 return (ke);
818 }
819
820 static void
821 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
822 {
823 struct pfr_kentry *p, *q;
824
825 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
826 q = SLIST_NEXT(p, pfrke_workq);
827 pfr_destroy_kentry(p);
828 }
829 }
830
831 static void
832 pfr_destroy_kentry(struct pfr_kentry *ke)
833 {
834 counter_u64_t c;
835
836 if ((c = ke->pfrke_counters.pfrkc_counters) != NULL)
837 uma_zfree_pcpu(V_pfr_kentry_counter_z, c);
838 uma_zfree(V_pfr_kentry_z, ke);
839 }
840
841 static void
842 pfr_insert_kentries(struct pfr_ktable *kt,
843 struct pfr_kentryworkq *workq, long tzero)
844 {
845 struct pfr_kentry *p;
846 int rv, n = 0;
847
848 SLIST_FOREACH(p, workq, pfrke_workq) {
849 rv = pfr_route_kentry(kt, p);
850 if (rv) {
851 printf("pfr_insert_kentries: cannot route entry "
852 "(code=%d).\n", rv);
853 break;
854 }
855 p->pfrke_counters.pfrkc_tzero = tzero;
856 n++;
857 }
858 kt->pfrkt_cnt += n;
859 }
860
861 int
862 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
863 {
864 struct pfr_kentry *p;
865 int rv;
866
867 p = pfr_lookup_addr(kt, ad, 1);
868 if (p != NULL)
869 return (0);
870 p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
871 if (p == NULL)
872 return (ENOMEM);
873
874 rv = pfr_route_kentry(kt, p);
875 if (rv)
876 return (rv);
877
878 p->pfrke_counters.pfrkc_tzero = tzero;
879 kt->pfrkt_cnt++;
880
881 return (0);
882 }
883
884 static void
885 pfr_remove_kentries(struct pfr_ktable *kt,
886 struct pfr_kentryworkq *workq)
887 {
888 struct pfr_kentry *p;
889 int n = 0;
890
891 SLIST_FOREACH(p, workq, pfrke_workq) {
892 pfr_unroute_kentry(kt, p);
893 n++;
894 }
895 kt->pfrkt_cnt -= n;
896 pfr_destroy_kentries(workq);
897 }
898
899 static void
900 pfr_clean_node_mask(struct pfr_ktable *kt,
901 struct pfr_kentryworkq *workq)
902 {
903 struct pfr_kentry *p;
904
905 SLIST_FOREACH(p, workq, pfrke_workq)
906 pfr_unroute_kentry(kt, p);
907 }
908
909 static void
910 pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
911 long tzero, int negchange)
912 {
913 struct pfr_kentry *p;
914 int i;
915
916 SLIST_FOREACH(p, workq, pfrke_workq) {
917 if (negchange)
918 p->pfrke_not = !p->pfrke_not;
919 if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0)
920 for (i = 0; i < PFR_NUM_COUNTERS; i++)
921 counter_u64_zero(
922 p->pfrke_counters.pfrkc_counters + i);
923 p->pfrke_counters.pfrkc_tzero = tzero;
924 }
925 }
926
927 static void
928 pfr_reset_feedback(struct pfr_addr *addr, int size)
929 {
930 struct pfr_addr *ad;
931 int i;
932
933 for (i = 0, ad = addr; i < size; i++, ad++)
934 ad->pfra_fback = PFR_FB_NONE;
935 }
936
937 static void
938 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
939 {
940 int i;
941
942 bzero(sa, sizeof(*sa));
943 if (af == AF_INET) {
944 sa->sin.sin_len = sizeof(sa->sin);
945 sa->sin.sin_family = AF_INET;
946 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
947 } else if (af == AF_INET6) {
948 sa->sin6.sin6_len = sizeof(sa->sin6);
949 sa->sin6.sin6_family = AF_INET6;
950 for (i = 0; i < 4; i++) {
951 if (net <= 32) {
952 sa->sin6.sin6_addr.s6_addr32[i] =
953 net ? htonl(-1 << (32-net)) : 0;
954 break;
955 }
956 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
957 net -= 32;
958 }
959 }
960 }
961
962 static int
963 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
964 {
965 union sockaddr_union mask;
966 struct radix_node *rn;
967 struct radix_head *head = NULL;
968
969 PF_RULES_WASSERT();
970
971 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
972 if (ke->pfrke_af == AF_INET)
973 head = &kt->pfrkt_ip4->rh;
974 else if (ke->pfrke_af == AF_INET6)
975 head = &kt->pfrkt_ip6->rh;
976
977 if (KENTRY_NETWORK(ke)) {
978 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
979 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
980 } else
981 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
982
983 return (rn == NULL ? -1 : 0);
984 }
985
986 static int
987 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
988 {
989 union sockaddr_union mask;
990 struct radix_node *rn;
991 struct radix_head *head = NULL;
992
993 if (ke->pfrke_af == AF_INET)
994 head = &kt->pfrkt_ip4->rh;
995 else if (ke->pfrke_af == AF_INET6)
996 head = &kt->pfrkt_ip6->rh;
997
998 if (KENTRY_NETWORK(ke)) {
999 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1000 rn = rn_delete(&ke->pfrke_sa, &mask, head);
1001 } else
1002 rn = rn_delete(&ke->pfrke_sa, NULL, head);
1003
1004 if (rn == NULL) {
1005 printf("pfr_unroute_kentry: delete failed.\n");
1006 return (-1);
1007 }
1008 return (0);
1009 }
1010
1011 static void
1012 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke)
1013 {
1014 bzero(ad, sizeof(*ad));
1015 if (ke == NULL)
1016 return;
1017 ad->pfra_af = ke->pfrke_af;
1018 ad->pfra_net = ke->pfrke_net;
1019 ad->pfra_not = ke->pfrke_not;
1020 if (ad->pfra_af == AF_INET)
1021 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1022 else if (ad->pfra_af == AF_INET6)
1023 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1024 }
1025
1026 static void
1027 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke,
1028 const struct pfr_walktree *w)
1029 {
1030 int dir, op;
1031 const struct pfr_kcounters *kc = &ke->pfrke_counters;
1032
1033 bzero(as, sizeof(*as));
1034 pfr_copyout_addr(&as->pfras_a, ke);
1035 as->pfras_tzero = kc->pfrkc_tzero;
1036
1037 if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS) ||
1038 kc->pfrkc_counters == NULL) {
1039 bzero(as->pfras_packets, sizeof(as->pfras_packets));
1040 bzero(as->pfras_bytes, sizeof(as->pfras_bytes));
1041 as->pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1042 return;
1043 }
1044
1045 for (dir = 0; dir < PFR_DIR_MAX; dir++) {
1046 for (op = 0; op < PFR_OP_ADDR_MAX; op ++) {
1047 as->pfras_packets[dir][op] = counter_u64_fetch(
1048 pfr_kentry_counter(kc, dir, op, PFR_TYPE_PACKETS));
1049 as->pfras_bytes[dir][op] = counter_u64_fetch(
1050 pfr_kentry_counter(kc, dir, op, PFR_TYPE_BYTES));
1051 }
1052 }
1053 }
1054
1055 static int
1056 pfr_walktree(struct radix_node *rn, void *arg)
1057 {
1058 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1059 struct pfr_walktree *w = arg;
1060
1061 switch (w->pfrw_op) {
1062 case PFRW_MARK:
1063 ke->pfrke_mark = 0;
1064 break;
1065 case PFRW_SWEEP:
1066 if (ke->pfrke_mark)
1067 break;
1068 /* FALLTHROUGH */
1069 case PFRW_ENQUEUE:
1070 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1071 w->pfrw_cnt++;
1072 break;
1073 case PFRW_GET_ADDRS:
1074 if (w->pfrw_free-- > 0) {
1075 pfr_copyout_addr(w->pfrw_addr, ke);
1076 w->pfrw_addr++;
1077 }
1078 break;
1079 case PFRW_GET_ASTATS:
1080 if (w->pfrw_free-- > 0) {
1081 struct pfr_astats as;
1082
1083 pfr_copyout_astats(&as, ke, w);
1084
1085 bcopy(&as, w->pfrw_astats, sizeof(as));
1086 w->pfrw_astats++;
1087 }
1088 break;
1089 case PFRW_POOL_GET:
1090 if (ke->pfrke_not)
1091 break; /* negative entries are ignored */
1092 if (!w->pfrw_cnt--) {
1093 w->pfrw_kentry = ke;
1094 return (1); /* finish search */
1095 }
1096 break;
1097 case PFRW_DYNADDR_UPDATE:
1098 {
1099 union sockaddr_union pfr_mask;
1100
1101 if (ke->pfrke_af == AF_INET) {
1102 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1103 break;
1104 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1105 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa,
1106 AF_INET);
1107 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask,
1108 AF_INET);
1109 } else if (ke->pfrke_af == AF_INET6){
1110 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1111 break;
1112 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1113 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa,
1114 AF_INET6);
1115 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask,
1116 AF_INET6);
1117 }
1118 break;
1119 }
1120 case PFRW_COUNTERS:
1121 {
1122 if (w->pfrw_flags & PFR_TFLAG_COUNTERS) {
1123 if (ke->pfrke_counters.pfrkc_counters != NULL)
1124 break;
1125 ke->pfrke_counters.pfrkc_counters =
1126 uma_zalloc_pcpu(V_pfr_kentry_counter_z,
1127 M_NOWAIT | M_ZERO);
1128 } else {
1129 uma_zfree_pcpu(V_pfr_kentry_counter_z,
1130 ke->pfrke_counters.pfrkc_counters);
1131 ke->pfrke_counters.pfrkc_counters = NULL;
1132 }
1133 break;
1134 }
1135 }
1136 return (0);
1137 }
1138
1139 int
1140 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1141 {
1142 struct pfr_ktableworkq workq;
1143 struct pfr_ktable *p;
1144 int xdel = 0;
1145
1146 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1147 if (pfr_fix_anchor(filter->pfrt_anchor))
1148 return (EINVAL);
1149 if (pfr_table_count(filter, flags) < 0)
1150 return (ENOENT);
1151
1152 SLIST_INIT(&workq);
1153 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1154 if (pfr_skip_table(filter, p, flags))
1155 continue;
1156 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1157 continue;
1158 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1159 continue;
1160 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1161 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1162 xdel++;
1163 }
1164 if (!(flags & PFR_FLAG_DUMMY))
1165 pfr_setflags_ktables(&workq);
1166 if (ndel != NULL)
1167 *ndel = xdel;
1168 return (0);
1169 }
1170
1171 int
1172 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1173 {
1174 struct pfr_ktableworkq addq, changeq;
1175 struct pfr_ktable *p, *q, *r, key;
1176 int i, rv, xadd = 0;
1177 long tzero = time_second;
1178
1179 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1180 SLIST_INIT(&addq);
1181 SLIST_INIT(&changeq);
1182 for (i = 0; i < size; i++) {
1183 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1184 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1185 flags & PFR_FLAG_USERIOCTL))
1186 senderr(EINVAL);
1187 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1188 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1189 if (p == NULL) {
1190 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1191 if (p == NULL)
1192 senderr(ENOMEM);
1193 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1194 if (!pfr_ktable_compare(p, q)) {
1195 pfr_destroy_ktable(p, 0);
1196 goto _skip;
1197 }
1198 }
1199 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1200 xadd++;
1201 if (!key.pfrkt_anchor[0])
1202 goto _skip;
1203
1204 /* find or create root table */
1205 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1206 r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1207 if (r != NULL) {
1208 p->pfrkt_root = r;
1209 goto _skip;
1210 }
1211 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1212 if (!pfr_ktable_compare(&key, q)) {
1213 p->pfrkt_root = q;
1214 goto _skip;
1215 }
1216 }
1217 key.pfrkt_flags = 0;
1218 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1219 if (r == NULL)
1220 senderr(ENOMEM);
1221 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1222 p->pfrkt_root = r;
1223 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1224 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1225 if (!pfr_ktable_compare(&key, q))
1226 goto _skip;
1227 p->pfrkt_nflags = (p->pfrkt_flags &
1228 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1229 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1230 xadd++;
1231 }
1232 _skip:
1233 ;
1234 }
1235 if (!(flags & PFR_FLAG_DUMMY)) {
1236 pfr_insert_ktables(&addq);
1237 pfr_setflags_ktables(&changeq);
1238 } else
1239 pfr_destroy_ktables(&addq, 0);
1240 if (nadd != NULL)
1241 *nadd = xadd;
1242 return (0);
1243 _bad:
1244 pfr_destroy_ktables(&addq, 0);
1245 return (rv);
1246 }
1247
1248 int
1249 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1250 {
1251 struct pfr_ktableworkq workq;
1252 struct pfr_ktable *p, *q, key;
1253 int i, xdel = 0;
1254
1255 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1256 SLIST_INIT(&workq);
1257 for (i = 0; i < size; i++) {
1258 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1259 if (pfr_validate_table(&key.pfrkt_t, 0,
1260 flags & PFR_FLAG_USERIOCTL))
1261 return (EINVAL);
1262 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1263 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1264 SLIST_FOREACH(q, &workq, pfrkt_workq)
1265 if (!pfr_ktable_compare(p, q))
1266 goto _skip;
1267 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1268 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1269 xdel++;
1270 }
1271 _skip:
1272 ;
1273 }
1274
1275 if (!(flags & PFR_FLAG_DUMMY))
1276 pfr_setflags_ktables(&workq);
1277 if (ndel != NULL)
1278 *ndel = xdel;
1279 return (0);
1280 }
1281
1282 int
1283 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1284 int flags)
1285 {
1286 struct pfr_ktable *p;
1287 int n, nn;
1288
1289 PF_RULES_RASSERT();
1290
1291 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1292 if (pfr_fix_anchor(filter->pfrt_anchor))
1293 return (EINVAL);
1294 n = nn = pfr_table_count(filter, flags);
1295 if (n < 0)
1296 return (ENOENT);
1297 if (n > *size) {
1298 *size = n;
1299 return (0);
1300 }
1301 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1302 if (pfr_skip_table(filter, p, flags))
1303 continue;
1304 if (n-- <= 0)
1305 continue;
1306 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1307 }
1308
1309 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1310
1311 *size = nn;
1312 return (0);
1313 }
1314
1315 int
1316 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1317 int flags)
1318 {
1319 struct pfr_ktable *p;
1320 struct pfr_ktableworkq workq;
1321 int n, nn;
1322 long tzero = time_second;
1323 int pfr_dir, pfr_op;
1324
1325 /* XXX PFR_FLAG_CLSTATS disabled */
1326 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1327 if (pfr_fix_anchor(filter->pfrt_anchor))
1328 return (EINVAL);
1329 n = nn = pfr_table_count(filter, flags);
1330 if (n < 0)
1331 return (ENOENT);
1332 if (n > *size) {
1333 *size = n;
1334 return (0);
1335 }
1336 SLIST_INIT(&workq);
1337 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1338 if (pfr_skip_table(filter, p, flags))
1339 continue;
1340 if (n-- <= 0)
1341 continue;
1342 bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t,
1343 sizeof(struct pfr_table));
1344 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1345 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1346 tbl->pfrts_packets[pfr_dir][pfr_op] =
1347 pfr_kstate_counter_fetch(
1348 &p->pfrkt_packets[pfr_dir][pfr_op]);
1349 tbl->pfrts_bytes[pfr_dir][pfr_op] =
1350 pfr_kstate_counter_fetch(
1351 &p->pfrkt_bytes[pfr_dir][pfr_op]);
1352 }
1353 }
1354 tbl->pfrts_match = pfr_kstate_counter_fetch(&p->pfrkt_match);
1355 tbl->pfrts_nomatch = pfr_kstate_counter_fetch(&p->pfrkt_nomatch);
1356 tbl->pfrts_tzero = p->pfrkt_tzero;
1357 tbl->pfrts_cnt = p->pfrkt_cnt;
1358 for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++)
1359 tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op];
1360 tbl++;
1361 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1362 }
1363 if (flags & PFR_FLAG_CLSTATS)
1364 pfr_clstats_ktables(&workq, tzero,
1365 flags & PFR_FLAG_ADDRSTOO);
1366
1367 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1368
1369 *size = nn;
1370 return (0);
1371 }
1372
1373 int
1374 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1375 {
1376 struct pfr_ktableworkq workq;
1377 struct pfr_ktable *p, key;
1378 int i, xzero = 0;
1379 long tzero = time_second;
1380
1381 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1382 SLIST_INIT(&workq);
1383 for (i = 0; i < size; i++) {
1384 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1385 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1386 return (EINVAL);
1387 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1388 if (p != NULL) {
1389 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1390 xzero++;
1391 }
1392 }
1393 if (!(flags & PFR_FLAG_DUMMY))
1394 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1395 if (nzero != NULL)
1396 *nzero = xzero;
1397 return (0);
1398 }
1399
1400 int
1401 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1402 int *nchange, int *ndel, int flags)
1403 {
1404 struct pfr_ktableworkq workq;
1405 struct pfr_ktable *p, *q, key;
1406 int i, xchange = 0, xdel = 0;
1407
1408 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1409 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1410 (clrflag & ~PFR_TFLAG_USRMASK) ||
1411 (setflag & clrflag))
1412 return (EINVAL);
1413 SLIST_INIT(&workq);
1414 for (i = 0; i < size; i++) {
1415 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1416 if (pfr_validate_table(&key.pfrkt_t, 0,
1417 flags & PFR_FLAG_USERIOCTL))
1418 return (EINVAL);
1419 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1420 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1421 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1422 ~clrflag;
1423 if (p->pfrkt_nflags == p->pfrkt_flags)
1424 goto _skip;
1425 SLIST_FOREACH(q, &workq, pfrkt_workq)
1426 if (!pfr_ktable_compare(p, q))
1427 goto _skip;
1428 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1429 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1430 (clrflag & PFR_TFLAG_PERSIST) &&
1431 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1432 xdel++;
1433 else
1434 xchange++;
1435 }
1436 _skip:
1437 ;
1438 }
1439 if (!(flags & PFR_FLAG_DUMMY))
1440 pfr_setflags_ktables(&workq);
1441 if (nchange != NULL)
1442 *nchange = xchange;
1443 if (ndel != NULL)
1444 *ndel = xdel;
1445 return (0);
1446 }
1447
1448 int
1449 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1450 {
1451 struct pfr_ktableworkq workq;
1452 struct pfr_ktable *p;
1453 struct pf_kruleset *rs;
1454 int xdel = 0;
1455
1456 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1457 rs = pf_find_or_create_kruleset(trs->pfrt_anchor);
1458 if (rs == NULL)
1459 return (ENOMEM);
1460 SLIST_INIT(&workq);
1461 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1462 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1463 pfr_skip_table(trs, p, 0))
1464 continue;
1465 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1466 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1467 xdel++;
1468 }
1469 if (!(flags & PFR_FLAG_DUMMY)) {
1470 pfr_setflags_ktables(&workq);
1471 if (ticket != NULL)
1472 *ticket = ++rs->tticket;
1473 rs->topen = 1;
1474 } else
1475 pf_remove_if_empty_kruleset(rs);
1476 if (ndel != NULL)
1477 *ndel = xdel;
1478 return (0);
1479 }
1480
1481 int
1482 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1483 int *nadd, int *naddr, u_int32_t ticket, int flags)
1484 {
1485 struct pfr_ktableworkq tableq;
1486 struct pfr_kentryworkq addrq;
1487 struct pfr_ktable *kt, *rt, *shadow, key;
1488 struct pfr_kentry *p;
1489 struct pfr_addr *ad;
1490 struct pf_kruleset *rs;
1491 int i, rv, xadd = 0, xaddr = 0;
1492
1493 PF_RULES_WASSERT();
1494
1495 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1496 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1497 return (EINVAL);
1498 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1499 flags & PFR_FLAG_USERIOCTL))
1500 return (EINVAL);
1501 rs = pf_find_kruleset(tbl->pfrt_anchor);
1502 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1503 return (EBUSY);
1504 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1505 SLIST_INIT(&tableq);
1506 kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
1507 if (kt == NULL) {
1508 kt = pfr_create_ktable(tbl, 0, 1);
1509 if (kt == NULL)
1510 return (ENOMEM);
1511 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1512 xadd++;
1513 if (!tbl->pfrt_anchor[0])
1514 goto _skip;
1515
1516 /* find or create root table */
1517 bzero(&key, sizeof(key));
1518 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1519 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1520 if (rt != NULL) {
1521 kt->pfrkt_root = rt;
1522 goto _skip;
1523 }
1524 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1525 if (rt == NULL) {
1526 pfr_destroy_ktables(&tableq, 0);
1527 return (ENOMEM);
1528 }
1529 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1530 kt->pfrkt_root = rt;
1531 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1532 xadd++;
1533 _skip:
1534 shadow = pfr_create_ktable(tbl, 0, 0);
1535 if (shadow == NULL) {
1536 pfr_destroy_ktables(&tableq, 0);
1537 return (ENOMEM);
1538 }
1539 SLIST_INIT(&addrq);
1540 for (i = 0, ad = addr; i < size; i++, ad++) {
1541 if (pfr_validate_addr(ad))
1542 senderr(EINVAL);
1543 if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1544 continue;
1545 p = pfr_create_kentry(ad,
1546 (shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
1547 if (p == NULL)
1548 senderr(ENOMEM);
1549 if (pfr_route_kentry(shadow, p)) {
1550 pfr_destroy_kentry(p);
1551 continue;
1552 }
1553 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1554 xaddr++;
1555 }
1556 if (!(flags & PFR_FLAG_DUMMY)) {
1557 if (kt->pfrkt_shadow != NULL)
1558 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1559 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1560 pfr_insert_ktables(&tableq);
1561 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1562 xaddr : NO_ADDRESSES;
1563 kt->pfrkt_shadow = shadow;
1564 } else {
1565 pfr_clean_node_mask(shadow, &addrq);
1566 pfr_destroy_ktable(shadow, 0);
1567 pfr_destroy_ktables(&tableq, 0);
1568 pfr_destroy_kentries(&addrq);
1569 }
1570 if (nadd != NULL)
1571 *nadd = xadd;
1572 if (naddr != NULL)
1573 *naddr = xaddr;
1574 return (0);
1575 _bad:
1576 pfr_destroy_ktable(shadow, 0);
1577 pfr_destroy_ktables(&tableq, 0);
1578 pfr_destroy_kentries(&addrq);
1579 return (rv);
1580 }
1581
1582 int
1583 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1584 {
1585 struct pfr_ktableworkq workq;
1586 struct pfr_ktable *p;
1587 struct pf_kruleset *rs;
1588 int xdel = 0;
1589
1590 PF_RULES_WASSERT();
1591
1592 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1593 rs = pf_find_kruleset(trs->pfrt_anchor);
1594 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1595 return (0);
1596 SLIST_INIT(&workq);
1597 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1598 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1599 pfr_skip_table(trs, p, 0))
1600 continue;
1601 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1602 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1603 xdel++;
1604 }
1605 if (!(flags & PFR_FLAG_DUMMY)) {
1606 pfr_setflags_ktables(&workq);
1607 rs->topen = 0;
1608 pf_remove_if_empty_kruleset(rs);
1609 }
1610 if (ndel != NULL)
1611 *ndel = xdel;
1612 return (0);
1613 }
1614
1615 int
1616 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1617 int *nchange, int flags)
1618 {
1619 struct pfr_ktable *p, *q;
1620 struct pfr_ktableworkq workq;
1621 struct pf_kruleset *rs;
1622 int xadd = 0, xchange = 0;
1623 long tzero = time_second;
1624
1625 PF_RULES_WASSERT();
1626
1627 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1628 rs = pf_find_kruleset(trs->pfrt_anchor);
1629 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1630 return (EBUSY);
1631
1632 SLIST_INIT(&workq);
1633 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1634 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1635 pfr_skip_table(trs, p, 0))
1636 continue;
1637 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1638 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1639 xchange++;
1640 else
1641 xadd++;
1642 }
1643
1644 if (!(flags & PFR_FLAG_DUMMY)) {
1645 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1646 q = SLIST_NEXT(p, pfrkt_workq);
1647 pfr_commit_ktable(p, tzero);
1648 }
1649 rs->topen = 0;
1650 pf_remove_if_empty_kruleset(rs);
1651 }
1652 if (nadd != NULL)
1653 *nadd = xadd;
1654 if (nchange != NULL)
1655 *nchange = xchange;
1656
1657 return (0);
1658 }
1659
1660 static void
1661 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1662 {
1663 counter_u64_t *pkc, *qkc;
1664 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1665 int nflags;
1666
1667 PF_RULES_WASSERT();
1668
1669 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1670 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1671 pfr_clstats_ktable(kt, tzero, 1);
1672 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1673 /* kt might contain addresses */
1674 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1675 struct pfr_kentry *p, *q, *next;
1676 struct pfr_addr ad;
1677
1678 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1679 pfr_mark_addrs(kt);
1680 SLIST_INIT(&addq);
1681 SLIST_INIT(&changeq);
1682 SLIST_INIT(&delq);
1683 SLIST_INIT(&garbageq);
1684 pfr_clean_node_mask(shadow, &addrq);
1685 SLIST_FOREACH_SAFE(p, &addrq, pfrke_workq, next) {
1686 pfr_copyout_addr(&ad, p);
1687 q = pfr_lookup_addr(kt, &ad, 1);
1688 if (q != NULL) {
1689 if (q->pfrke_not != p->pfrke_not)
1690 SLIST_INSERT_HEAD(&changeq, q,
1691 pfrke_workq);
1692 pkc = &p->pfrke_counters.pfrkc_counters;
1693 qkc = &q->pfrke_counters.pfrkc_counters;
1694 if ((*pkc == NULL) != (*qkc == NULL))
1695 SWAP(counter_u64_t, *pkc, *qkc);
1696 q->pfrke_mark = 1;
1697 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1698 } else {
1699 p->pfrke_counters.pfrkc_tzero = tzero;
1700 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1701 }
1702 }
1703 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1704 pfr_insert_kentries(kt, &addq, tzero);
1705 pfr_remove_kentries(kt, &delq);
1706 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
1707 pfr_destroy_kentries(&garbageq);
1708 } else {
1709 /* kt cannot contain addresses */
1710 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1711 shadow->pfrkt_ip4);
1712 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1713 shadow->pfrkt_ip6);
1714 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1715 pfr_clstats_ktable(kt, tzero, 1);
1716 }
1717 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1718 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1719 & ~PFR_TFLAG_INACTIVE;
1720 pfr_destroy_ktable(shadow, 0);
1721 kt->pfrkt_shadow = NULL;
1722 pfr_setflags_ktable(kt, nflags);
1723 }
1724
1725 static int
1726 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1727 {
1728 int i;
1729
1730 if (!tbl->pfrt_name[0])
1731 return (-1);
1732 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1733 return (-1);
1734 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1735 return (-1);
1736 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1737 if (tbl->pfrt_name[i])
1738 return (-1);
1739 if (pfr_fix_anchor(tbl->pfrt_anchor))
1740 return (-1);
1741 if (tbl->pfrt_flags & ~allowedflags)
1742 return (-1);
1743 return (0);
1744 }
1745
1746 /*
1747 * Rewrite anchors referenced by tables to remove slashes
1748 * and check for validity.
1749 */
1750 static int
1751 pfr_fix_anchor(char *anchor)
1752 {
1753 size_t siz = MAXPATHLEN;
1754 int i;
1755
1756 if (anchor[0] == '/') {
1757 char *path;
1758 int off;
1759
1760 path = anchor;
1761 off = 1;
1762 while (*++path == '/')
1763 off++;
1764 bcopy(path, anchor, siz - off);
1765 memset(anchor + siz - off, 0, off);
1766 }
1767 if (anchor[siz - 1])
1768 return (-1);
1769 for (i = strlen(anchor); i < siz; i++)
1770 if (anchor[i])
1771 return (-1);
1772 return (0);
1773 }
1774
1775 int
1776 pfr_table_count(struct pfr_table *filter, int flags)
1777 {
1778 struct pf_kruleset *rs;
1779
1780 PF_RULES_ASSERT();
1781
1782 if (flags & PFR_FLAG_ALLRSETS)
1783 return (V_pfr_ktable_cnt);
1784 if (filter->pfrt_anchor[0]) {
1785 rs = pf_find_kruleset(filter->pfrt_anchor);
1786 return ((rs != NULL) ? rs->tables : -1);
1787 }
1788 return (pf_main_ruleset.tables);
1789 }
1790
1791 static int
1792 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1793 {
1794 if (flags & PFR_FLAG_ALLRSETS)
1795 return (0);
1796 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1797 return (1);
1798 return (0);
1799 }
1800
1801 static void
1802 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1803 {
1804 struct pfr_ktable *p;
1805
1806 SLIST_FOREACH(p, workq, pfrkt_workq)
1807 pfr_insert_ktable(p);
1808 }
1809
1810 static void
1811 pfr_insert_ktable(struct pfr_ktable *kt)
1812 {
1813
1814 PF_RULES_WASSERT();
1815
1816 RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
1817 V_pfr_ktable_cnt++;
1818 if (kt->pfrkt_root != NULL)
1819 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1820 pfr_setflags_ktable(kt->pfrkt_root,
1821 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1822 }
1823
1824 static void
1825 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1826 {
1827 struct pfr_ktable *p, *q;
1828
1829 for (p = SLIST_FIRST(workq); p; p = q) {
1830 q = SLIST_NEXT(p, pfrkt_workq);
1831 pfr_setflags_ktable(p, p->pfrkt_nflags);
1832 }
1833 }
1834
1835 static void
1836 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1837 {
1838 struct pfr_kentryworkq addrq;
1839 struct pfr_walktree w;
1840
1841 PF_RULES_WASSERT();
1842
1843 if (!(newf & PFR_TFLAG_REFERENCED) &&
1844 !(newf & PFR_TFLAG_REFDANCHOR) &&
1845 !(newf & PFR_TFLAG_PERSIST))
1846 newf &= ~PFR_TFLAG_ACTIVE;
1847 if (!(newf & PFR_TFLAG_ACTIVE))
1848 newf &= ~PFR_TFLAG_USRMASK;
1849 if (!(newf & PFR_TFLAG_SETMASK)) {
1850 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
1851 if (kt->pfrkt_root != NULL)
1852 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1853 pfr_setflags_ktable(kt->pfrkt_root,
1854 kt->pfrkt_root->pfrkt_flags &
1855 ~PFR_TFLAG_REFDANCHOR);
1856 pfr_destroy_ktable(kt, 1);
1857 V_pfr_ktable_cnt--;
1858 return;
1859 }
1860 if (newf & PFR_TFLAG_COUNTERS && ! (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1861 bzero(&w, sizeof(w));
1862 w.pfrw_op = PFRW_COUNTERS;
1863 w.pfrw_flags |= PFR_TFLAG_COUNTERS;
1864 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
1865 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
1866 }
1867 if (! (newf & PFR_TFLAG_COUNTERS) && (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1868 bzero(&w, sizeof(w));
1869 w.pfrw_op = PFRW_COUNTERS;
1870 w.pfrw_flags |= 0;
1871 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
1872 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
1873 }
1874 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1875 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1876 pfr_remove_kentries(kt, &addrq);
1877 }
1878 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1879 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1880 kt->pfrkt_shadow = NULL;
1881 }
1882 kt->pfrkt_flags = newf;
1883 }
1884
1885 static void
1886 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1887 {
1888 struct pfr_ktable *p;
1889
1890 SLIST_FOREACH(p, workq, pfrkt_workq)
1891 pfr_clstats_ktable(p, tzero, recurse);
1892 }
1893
1894 static void
1895 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1896 {
1897 struct pfr_kentryworkq addrq;
1898 int pfr_dir, pfr_op;
1899
1900 MPASS(PF_TABLE_STATS_OWNED() || PF_RULES_WOWNED());
1901
1902 if (recurse) {
1903 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1904 pfr_clstats_kentries(kt, &addrq, tzero, 0);
1905 }
1906 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1907 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1908 pfr_kstate_counter_zero(&kt->pfrkt_packets[pfr_dir][pfr_op]);
1909 pfr_kstate_counter_zero(&kt->pfrkt_bytes[pfr_dir][pfr_op]);
1910 }
1911 }
1912 pfr_kstate_counter_zero(&kt->pfrkt_match);
1913 pfr_kstate_counter_zero(&kt->pfrkt_nomatch);
1914 kt->pfrkt_tzero = tzero;
1915 }
1916
1917 static struct pfr_ktable *
1918 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1919 {
1920 struct pfr_ktable *kt;
1921 struct pf_kruleset *rs;
1922 int pfr_dir, pfr_op;
1923
1924 PF_RULES_WASSERT();
1925
1926 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1927 if (kt == NULL)
1928 return (NULL);
1929 kt->pfrkt_t = *tbl;
1930
1931 if (attachruleset) {
1932 rs = pf_find_or_create_kruleset(tbl->pfrt_anchor);
1933 if (!rs) {
1934 pfr_destroy_ktable(kt, 0);
1935 return (NULL);
1936 }
1937 kt->pfrkt_rs = rs;
1938 rs->tables++;
1939 }
1940
1941 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1942 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1943 if (pfr_kstate_counter_init(
1944 &kt->pfrkt_packets[pfr_dir][pfr_op], M_NOWAIT) != 0) {
1945 pfr_destroy_ktable(kt, 0);
1946 return (NULL);
1947 }
1948 if (pfr_kstate_counter_init(
1949 &kt->pfrkt_bytes[pfr_dir][pfr_op], M_NOWAIT) != 0) {
1950 pfr_destroy_ktable(kt, 0);
1951 return (NULL);
1952 }
1953 }
1954 }
1955 if (pfr_kstate_counter_init(&kt->pfrkt_match, M_NOWAIT) != 0) {
1956 pfr_destroy_ktable(kt, 0);
1957 return (NULL);
1958 }
1959
1960 if (pfr_kstate_counter_init(&kt->pfrkt_nomatch, M_NOWAIT) != 0) {
1961 pfr_destroy_ktable(kt, 0);
1962 return (NULL);
1963 }
1964
1965 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1966 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1967 !rn_inithead((void **)&kt->pfrkt_ip6,
1968 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1969 pfr_destroy_ktable(kt, 0);
1970 return (NULL);
1971 }
1972 kt->pfrkt_tzero = tzero;
1973
1974 return (kt);
1975 }
1976
1977 static void
1978 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1979 {
1980 struct pfr_ktable *p, *q;
1981
1982 for (p = SLIST_FIRST(workq); p; p = q) {
1983 q = SLIST_NEXT(p, pfrkt_workq);
1984 pfr_destroy_ktable(p, flushaddr);
1985 }
1986 }
1987
1988 static void
1989 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1990 {
1991 struct pfr_kentryworkq addrq;
1992 int pfr_dir, pfr_op;
1993
1994 if (flushaddr) {
1995 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1996 pfr_clean_node_mask(kt, &addrq);
1997 pfr_destroy_kentries(&addrq);
1998 }
1999 if (kt->pfrkt_ip4 != NULL)
2000 rn_detachhead((void **)&kt->pfrkt_ip4);
2001 if (kt->pfrkt_ip6 != NULL)
2002 rn_detachhead((void **)&kt->pfrkt_ip6);
2003 if (kt->pfrkt_shadow != NULL)
2004 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2005 if (kt->pfrkt_rs != NULL) {
2006 kt->pfrkt_rs->tables--;
2007 pf_remove_if_empty_kruleset(kt->pfrkt_rs);
2008 }
2009 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
2010 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
2011 pfr_kstate_counter_deinit(&kt->pfrkt_packets[pfr_dir][pfr_op]);
2012 pfr_kstate_counter_deinit(&kt->pfrkt_bytes[pfr_dir][pfr_op]);
2013 }
2014 }
2015 pfr_kstate_counter_deinit(&kt->pfrkt_match);
2016 pfr_kstate_counter_deinit(&kt->pfrkt_nomatch);
2017
2018 free(kt, M_PFTABLE);
2019 }
2020
2021 static int
2022 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2023 {
2024 int d;
2025
2026 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2027 return (d);
2028 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2029 }
2030
2031 static struct pfr_ktable *
2032 pfr_lookup_table(struct pfr_table *tbl)
2033 {
2034 /* struct pfr_ktable start like a struct pfr_table */
2035 return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
2036 (struct pfr_ktable *)tbl));
2037 }
2038
2039 int
2040 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2041 {
2042 struct pfr_kentry *ke = NULL;
2043 int match;
2044
2045 PF_RULES_RASSERT();
2046
2047 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2048 kt = kt->pfrkt_root;
2049 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2050 return (0);
2051
2052 switch (af) {
2053 #ifdef INET
2054 case AF_INET:
2055 {
2056 struct sockaddr_in sin;
2057
2058 bzero(&sin, sizeof(sin));
2059 sin.sin_len = sizeof(sin);
2060 sin.sin_family = AF_INET;
2061 sin.sin_addr.s_addr = a->addr32[0];
2062 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2063 if (ke && KENTRY_RNF_ROOT(ke))
2064 ke = NULL;
2065 break;
2066 }
2067 #endif /* INET */
2068 #ifdef INET6
2069 case AF_INET6:
2070 {
2071 struct sockaddr_in6 sin6;
2072
2073 bzero(&sin6, sizeof(sin6));
2074 sin6.sin6_len = sizeof(sin6);
2075 sin6.sin6_family = AF_INET6;
2076 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2077 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2078 if (ke && KENTRY_RNF_ROOT(ke))
2079 ke = NULL;
2080 break;
2081 }
2082 #endif /* INET6 */
2083 }
2084 match = (ke && !ke->pfrke_not);
2085 if (match)
2086 pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2087 else
2088 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2089 return (match);
2090 }
2091
2092 void
2093 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2094 u_int64_t len, int dir_out, int op_pass, int notrule)
2095 {
2096 struct pfr_kentry *ke = NULL;
2097
2098 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2099 kt = kt->pfrkt_root;
2100 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2101 return;
2102
2103 switch (af) {
2104 #ifdef INET
2105 case AF_INET:
2106 {
2107 struct sockaddr_in sin;
2108
2109 bzero(&sin, sizeof(sin));
2110 sin.sin_len = sizeof(sin);
2111 sin.sin_family = AF_INET;
2112 sin.sin_addr.s_addr = a->addr32[0];
2113 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2114 if (ke && KENTRY_RNF_ROOT(ke))
2115 ke = NULL;
2116 break;
2117 }
2118 #endif /* INET */
2119 #ifdef INET6
2120 case AF_INET6:
2121 {
2122 struct sockaddr_in6 sin6;
2123
2124 bzero(&sin6, sizeof(sin6));
2125 sin6.sin6_len = sizeof(sin6);
2126 sin6.sin6_family = AF_INET6;
2127 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2128 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2129 if (ke && KENTRY_RNF_ROOT(ke))
2130 ke = NULL;
2131 break;
2132 }
2133 #endif /* INET6 */
2134 default:
2135 panic("%s: unknown address family %u", __func__, af);
2136 }
2137 if ((ke == NULL || ke->pfrke_not) != notrule) {
2138 if (op_pass != PFR_OP_PASS)
2139 DPFPRINTF(PF_DEBUG_URGENT,
2140 ("pfr_update_stats: assertion failed.\n"));
2141 op_pass = PFR_OP_XPASS;
2142 }
2143 pfr_kstate_counter_add(&kt->pfrkt_packets[dir_out][op_pass], 1);
2144 pfr_kstate_counter_add(&kt->pfrkt_bytes[dir_out][op_pass], len);
2145 if (ke != NULL && op_pass != PFR_OP_XPASS &&
2146 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2147 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2148 dir_out, op_pass, PFR_TYPE_PACKETS), 1);
2149 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2150 dir_out, op_pass, PFR_TYPE_BYTES), len);
2151 }
2152 }
2153
2154 struct pfr_ktable *
2155 pfr_eth_attach_table(struct pf_keth_ruleset *rs, char *name)
2156 {
2157 struct pfr_ktable *kt, *rt;
2158 struct pfr_table tbl;
2159 struct pf_keth_anchor *ac = rs->anchor;
2160
2161 PF_RULES_WASSERT();
2162
2163 bzero(&tbl, sizeof(tbl));
2164 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2165 if (ac != NULL)
2166 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2167 kt = pfr_lookup_table(&tbl);
2168 if (kt == NULL) {
2169 kt = pfr_create_ktable(&tbl, time_second, 1);
2170 if (kt == NULL)
2171 return (NULL);
2172 if (ac != NULL) {
2173 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2174 rt = pfr_lookup_table(&tbl);
2175 if (rt == NULL) {
2176 rt = pfr_create_ktable(&tbl, 0, 1);
2177 if (rt == NULL) {
2178 pfr_destroy_ktable(kt, 0);
2179 return (NULL);
2180 }
2181 pfr_insert_ktable(rt);
2182 }
2183 kt->pfrkt_root = rt;
2184 }
2185 pfr_insert_ktable(kt);
2186 }
2187 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2188 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2189 return (kt);
2190 }
2191
2192 struct pfr_ktable *
2193 pfr_attach_table(struct pf_kruleset *rs, char *name)
2194 {
2195 struct pfr_ktable *kt, *rt;
2196 struct pfr_table tbl;
2197 struct pf_kanchor *ac = rs->anchor;
2198
2199 PF_RULES_WASSERT();
2200
2201 bzero(&tbl, sizeof(tbl));
2202 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2203 if (ac != NULL)
2204 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2205 kt = pfr_lookup_table(&tbl);
2206 if (kt == NULL) {
2207 kt = pfr_create_ktable(&tbl, time_second, 1);
2208 if (kt == NULL)
2209 return (NULL);
2210 if (ac != NULL) {
2211 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2212 rt = pfr_lookup_table(&tbl);
2213 if (rt == NULL) {
2214 rt = pfr_create_ktable(&tbl, 0, 1);
2215 if (rt == NULL) {
2216 pfr_destroy_ktable(kt, 0);
2217 return (NULL);
2218 }
2219 pfr_insert_ktable(rt);
2220 }
2221 kt->pfrkt_root = rt;
2222 }
2223 pfr_insert_ktable(kt);
2224 }
2225 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2226 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2227 return (kt);
2228 }
2229
2230 void
2231 pfr_detach_table(struct pfr_ktable *kt)
2232 {
2233
2234 PF_RULES_WASSERT();
2235 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2236 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2237
2238 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2239 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2240 }
2241
2242 int
2243 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2244 sa_family_t af)
2245 {
2246 struct pf_addr *addr, *cur, *mask;
2247 union sockaddr_union uaddr, umask;
2248 struct pfr_kentry *ke, *ke2 = NULL;
2249 int idx = -1, use_counter = 0;
2250
2251 MPASS(pidx != NULL);
2252 MPASS(counter != NULL);
2253
2254 switch (af) {
2255 case AF_INET:
2256 uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2257 uaddr.sin.sin_family = AF_INET;
2258 break;
2259 case AF_INET6:
2260 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2261 uaddr.sin6.sin6_family = AF_INET6;
2262 break;
2263 }
2264 addr = SUNION2PF(&uaddr, af);
2265
2266 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2267 kt = kt->pfrkt_root;
2268 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2269 return (-1);
2270
2271 idx = *pidx;
2272 if (idx >= 0)
2273 use_counter = 1;
2274 if (idx < 0)
2275 idx = 0;
2276
2277 _next_block:
2278 ke = pfr_kentry_byidx(kt, idx, af);
2279 if (ke == NULL) {
2280 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2281 return (1);
2282 }
2283 pfr_prepare_network(&umask, af, ke->pfrke_net);
2284 cur = SUNION2PF(&ke->pfrke_sa, af);
2285 mask = SUNION2PF(&umask, af);
2286
2287 if (use_counter) {
2288 /* is supplied address within block? */
2289 if (!PF_MATCHA(0, cur, mask, counter, af)) {
2290 /* no, go to next block in table */
2291 idx++;
2292 use_counter = 0;
2293 goto _next_block;
2294 }
2295 PF_ACPY(addr, counter, af);
2296 } else {
2297 /* use first address of block */
2298 PF_ACPY(addr, cur, af);
2299 }
2300
2301 if (!KENTRY_NETWORK(ke)) {
2302 /* this is a single IP address - no possible nested block */
2303 PF_ACPY(counter, addr, af);
2304 *pidx = idx;
2305 pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2306 return (0);
2307 }
2308 for (;;) {
2309 /* we don't want to use a nested block */
2310 switch (af) {
2311 case AF_INET:
2312 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2313 &kt->pfrkt_ip4->rh);
2314 break;
2315 case AF_INET6:
2316 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2317 &kt->pfrkt_ip6->rh);
2318 break;
2319 }
2320 /* no need to check KENTRY_RNF_ROOT() here */
2321 if (ke2 == ke) {
2322 /* lookup return the same block - perfect */
2323 PF_ACPY(counter, addr, af);
2324 *pidx = idx;
2325 pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2326 return (0);
2327 }
2328
2329 /* we need to increase the counter past the nested block */
2330 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2331 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af);
2332 PF_AINC(addr, af);
2333 if (!PF_MATCHA(0, cur, mask, addr, af)) {
2334 /* ok, we reached the end of our main block */
2335 /* go to next block in table */
2336 idx++;
2337 use_counter = 0;
2338 goto _next_block;
2339 }
2340 }
2341 }
2342
2343 static struct pfr_kentry *
2344 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2345 {
2346 struct pfr_walktree w;
2347
2348 bzero(&w, sizeof(w));
2349 w.pfrw_op = PFRW_POOL_GET;
2350 w.pfrw_cnt = idx;
2351
2352 switch (af) {
2353 #ifdef INET
2354 case AF_INET:
2355 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2356 return (w.pfrw_kentry);
2357 #endif /* INET */
2358 #ifdef INET6
2359 case AF_INET6:
2360 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2361 return (w.pfrw_kentry);
2362 #endif /* INET6 */
2363 default:
2364 return (NULL);
2365 }
2366 }
2367
2368 void
2369 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2370 {
2371 struct pfr_walktree w;
2372
2373 bzero(&w, sizeof(w));
2374 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2375 w.pfrw_dyn = dyn;
2376
2377 dyn->pfid_acnt4 = 0;
2378 dyn->pfid_acnt6 = 0;
2379 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2380 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2381 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2382 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2383 }
Cache object: b8f1d1f1f8b7d72b33546cb84b979c55
|