1 /* $FreeBSD$ */
2
3 /*
4 * Copyright (C) 2012 by Darren Reed.
5 *
6 * See the IPFILTER.LICENCE file for details on licencing.
7 */
8 #if defined(KERNEL) || defined(_KERNEL)
9 # undef KERNEL
10 # undef _KERNEL
11 # define KERNEL 1
12 # define _KERNEL 1
13 #endif
14 #include <sys/errno.h>
15 #include <sys/types.h>
16 #include <sys/param.h>
17 #include <sys/time.h>
18 #include <sys/file.h>
19 #if !defined(_KERNEL)
20 # include <stdio.h>
21 # include <string.h>
22 # include <stdlib.h>
23 # define _KERNEL
24 # include <sys/uio.h>
25 # undef _KERNEL
26 #endif
27 #if defined(_KERNEL) && defined(__FreeBSD__)
28 # include <sys/filio.h>
29 # include <sys/fcntl.h>
30 #else
31 # include <sys/ioctl.h>
32 #endif
33 # include <sys/protosw.h>
34 #include <sys/socket.h>
35 #if defined(_KERNEL)
36 # include <sys/systm.h>
37 # if !defined(__SVR4)
38 # include <sys/mbuf.h>
39 # endif
40 #endif
41 #if !defined(__SVR4)
42 # if defined(_KERNEL)
43 # include <sys/kernel.h>
44 # endif
45 #else
46 # include <sys/byteorder.h>
47 # ifdef _KERNEL
48 # include <sys/dditypes.h>
49 # endif
50 # include <sys/stream.h>
51 # include <sys/kmem.h>
52 #endif
53 #include <net/if.h>
54 #ifdef sun
55 # include <net/af.h>
56 #endif
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/ip.h>
60 # include <netinet/ip_var.h>
61 #include <netinet/tcp.h>
62 #include <netinet/udp.h>
63 #include <netinet/ip_icmp.h>
64 #include "netinet/ip_compat.h"
65 #include <netinet/tcpip.h>
66 #include "netinet/ip_fil.h"
67 #include "netinet/ip_nat.h"
68 #include "netinet/ip_frag.h"
69 #include "netinet/ip_state.h"
70 #include "netinet/ip_auth.h"
71 #include "netinet/ip_lookup.h"
72 #include "netinet/ip_proxy.h"
73 #include "netinet/ip_sync.h"
74 /* END OF INCLUDES */
75
76 #if !defined(lint)
77 static const char sccsid[] = "@(#)ip_frag.c 1.11 3/24/96 (C) 1993-2000 Darren Reed";
78 static const char rcsid[] = "@(#)$FreeBSD$";
79 /* static const char rcsid[] = "@(#)$Id: ip_frag.c,v 2.77.2.12 2007/09/20 12:51:51 darrenr Exp $"; */
80 #endif
81
82
83 #ifdef USE_MUTEXES
84 static ipfr_t *ipfr_frag_new(ipf_main_softc_t *, ipf_frag_softc_t *,
85 fr_info_t *, u_32_t, ipfr_t **,
86 ipfrwlock_t *);
87 static ipfr_t *ipf_frag_lookup(ipf_main_softc_t *, ipf_frag_softc_t *, fr_info_t *, ipfr_t **, ipfrwlock_t *);
88 static void ipf_frag_deref(void *, ipfr_t **, ipfrwlock_t *);
89 static int ipf_frag_next(ipf_main_softc_t *, ipftoken_t *, ipfgeniter_t *,
90 ipfr_t **, ipfrwlock_t *);
91 #else
92 static ipfr_t *ipfr_frag_new(ipf_main_softc_t *, ipf_frag_softc_t *,
93 fr_info_t *, u_32_t, ipfr_t **);
94 static ipfr_t *ipf_frag_lookup(ipf_main_softc_t *, ipf_frag_softc_t *, fr_info_t *, ipfr_t **);
95 static void ipf_frag_deref(void *, ipfr_t **);
96 static int ipf_frag_next(ipf_main_softc_t *, ipftoken_t *, ipfgeniter_t *,
97 ipfr_t **);
98 #endif
99 static void ipf_frag_delete(ipf_main_softc_t *, ipfr_t *, ipfr_t ***);
100 static void ipf_frag_free(ipf_frag_softc_t *, ipfr_t *);
101
102 static frentry_t ipfr_block;
103
104 static ipftuneable_t ipf_frag_tuneables[] = {
105 { { (void *)offsetof(ipf_frag_softc_t, ipfr_size) },
106 "frag_size", 1, 0x7fffffff,
107 stsizeof(ipf_frag_softc_t, ipfr_size),
108 IPFT_WRDISABLED, NULL, NULL },
109 { { (void *)offsetof(ipf_frag_softc_t, ipfr_ttl) },
110 "frag_ttl", 1, 0x7fffffff,
111 stsizeof(ipf_frag_softc_t, ipfr_ttl),
112 0, NULL, NULL },
113 { { NULL },
114 NULL, 0, 0,
115 0,
116 0, NULL, NULL }
117 };
118
119 #define FBUMP(x) softf->ipfr_stats.x++
120 #define FBUMPD(x) do { softf->ipfr_stats.x++; DT(x); } while (0)
121
122
123 /* ------------------------------------------------------------------------ */
124 /* Function: ipf_frag_main_load */
125 /* Returns: int - 0 == success, -1 == error */
126 /* Parameters: Nil */
127 /* */
128 /* Initialise the filter rule associted with blocked packets - everyone can */
129 /* use it. */
130 /* ------------------------------------------------------------------------ */
131 int
132 ipf_frag_main_load(void)
133 {
134 bzero((char *)&ipfr_block, sizeof(ipfr_block));
135 ipfr_block.fr_flags = FR_BLOCK|FR_QUICK;
136 ipfr_block.fr_ref = 1;
137
138 return (0);
139 }
140
141
142 /* ------------------------------------------------------------------------ */
143 /* Function: ipf_frag_main_unload */
144 /* Returns: int - 0 == success, -1 == error */
145 /* Parameters: Nil */
146 /* */
147 /* A null-op function that exists as a placeholder so that the flow in */
148 /* other functions is obvious. */
149 /* ------------------------------------------------------------------------ */
150 int
151 ipf_frag_main_unload(void)
152 {
153 return (0);
154 }
155
156
157 /* ------------------------------------------------------------------------ */
158 /* Function: ipf_frag_soft_create */
159 /* Returns: void * - NULL = failure, else pointer to local context */
160 /* Parameters: softc(I) - pointer to soft context main structure */
161 /* */
162 /* Allocate a new soft context structure to track fragment related info. */
163 /* ------------------------------------------------------------------------ */
164 /*ARGSUSED*/
165 void *
166 ipf_frag_soft_create(ipf_main_softc_t *softc)
167 {
168 ipf_frag_softc_t *softf;
169
170 KMALLOC(softf, ipf_frag_softc_t *);
171 if (softf == NULL)
172 return (NULL);
173
174 bzero((char *)softf, sizeof(*softf));
175
176 RWLOCK_INIT(&softf->ipfr_ipidfrag, "frag ipid lock");
177 RWLOCK_INIT(&softf->ipfr_frag, "ipf fragment rwlock");
178 RWLOCK_INIT(&softf->ipfr_natfrag, "ipf NAT fragment rwlock");
179
180 softf->ipf_frag_tune = ipf_tune_array_copy(softf,
181 sizeof(ipf_frag_tuneables),
182 ipf_frag_tuneables);
183 if (softf->ipf_frag_tune == NULL) {
184 ipf_frag_soft_destroy(softc, softf);
185 return (NULL);
186 }
187 if (ipf_tune_array_link(softc, softf->ipf_frag_tune) == -1) {
188 ipf_frag_soft_destroy(softc, softf);
189 return (NULL);
190 }
191
192 softf->ipfr_size = IPFT_SIZE;
193 softf->ipfr_ttl = IPF_TTLVAL(60);
194 softf->ipfr_lock = 1;
195 softf->ipfr_tail = &softf->ipfr_list;
196 softf->ipfr_nattail = &softf->ipfr_natlist;
197 softf->ipfr_ipidtail = &softf->ipfr_ipidlist;
198
199 return (softf);
200 }
201
202
203 /* ------------------------------------------------------------------------ */
204 /* Function: ipf_frag_soft_destroy */
205 /* Returns: Nil */
206 /* Parameters: softc(I) - pointer to soft context main structure */
207 /* arg(I) - pointer to local context to use */
208 /* */
209 /* Initialise the hash tables for the fragment cache lookups. */
210 /* ------------------------------------------------------------------------ */
211 void
212 ipf_frag_soft_destroy(ipf_main_softc_t *softc, void *arg)
213 {
214 ipf_frag_softc_t *softf = arg;
215
216 RW_DESTROY(&softf->ipfr_ipidfrag);
217 RW_DESTROY(&softf->ipfr_frag);
218 RW_DESTROY(&softf->ipfr_natfrag);
219
220 if (softf->ipf_frag_tune != NULL) {
221 ipf_tune_array_unlink(softc, softf->ipf_frag_tune);
222 KFREES(softf->ipf_frag_tune, sizeof(ipf_frag_tuneables));
223 softf->ipf_frag_tune = NULL;
224 }
225
226 KFREE(softf);
227 }
228
229
230 /* ------------------------------------------------------------------------ */
231 /* Function: ipf_frag_soft_init */
232 /* Returns: int - 0 == success, -1 == error */
233 /* Parameters: softc(I) - pointer to soft context main structure */
234 /* arg(I) - pointer to local context to use */
235 /* */
236 /* Initialise the hash tables for the fragment cache lookups. */
237 /* ------------------------------------------------------------------------ */
238 /*ARGSUSED*/
239 int
240 ipf_frag_soft_init(ipf_main_softc_t *softc, void *arg)
241 {
242 ipf_frag_softc_t *softf = arg;
243
244 KMALLOCS(softf->ipfr_heads, ipfr_t **,
245 softf->ipfr_size * sizeof(ipfr_t *));
246 if (softf->ipfr_heads == NULL)
247 return (-1);
248
249 bzero((char *)softf->ipfr_heads, softf->ipfr_size * sizeof(ipfr_t *));
250
251 KMALLOCS(softf->ipfr_nattab, ipfr_t **,
252 softf->ipfr_size * sizeof(ipfr_t *));
253 if (softf->ipfr_nattab == NULL)
254 return (-2);
255
256 bzero((char *)softf->ipfr_nattab, softf->ipfr_size * sizeof(ipfr_t *));
257
258 KMALLOCS(softf->ipfr_ipidtab, ipfr_t **,
259 softf->ipfr_size * sizeof(ipfr_t *));
260 if (softf->ipfr_ipidtab == NULL)
261 return (-3);
262
263 bzero((char *)softf->ipfr_ipidtab,
264 softf->ipfr_size * sizeof(ipfr_t *));
265
266 softf->ipfr_lock = 0;
267 softf->ipfr_inited = 1;
268
269 return (0);
270 }
271
272
273 /* ------------------------------------------------------------------------ */
274 /* Function: ipf_frag_soft_fini */
275 /* Returns: int - 0 == success, -1 == error */
276 /* Parameters: softc(I) - pointer to soft context main structure */
277 /* arg(I) - pointer to local context to use */
278 /* */
279 /* Free all memory allocated whilst running and from initialisation. */
280 /* ------------------------------------------------------------------------ */
281 int
282 ipf_frag_soft_fini(ipf_main_softc_t *softc, void *arg)
283 {
284 ipf_frag_softc_t *softf = arg;
285
286 softf->ipfr_lock = 1;
287
288 if (softf->ipfr_inited == 1) {
289 ipf_frag_clear(softc);
290
291 softf->ipfr_inited = 0;
292 }
293
294 if (softf->ipfr_heads != NULL)
295 KFREES(softf->ipfr_heads,
296 softf->ipfr_size * sizeof(ipfr_t *));
297 softf->ipfr_heads = NULL;
298
299 if (softf->ipfr_nattab != NULL)
300 KFREES(softf->ipfr_nattab,
301 softf->ipfr_size * sizeof(ipfr_t *));
302 softf->ipfr_nattab = NULL;
303
304 if (softf->ipfr_ipidtab != NULL)
305 KFREES(softf->ipfr_ipidtab,
306 softf->ipfr_size * sizeof(ipfr_t *));
307 softf->ipfr_ipidtab = NULL;
308
309 return (0);
310 }
311
312
313 /* ------------------------------------------------------------------------ */
314 /* Function: ipf_frag_set_lock */
315 /* Returns: Nil */
316 /* Parameters: arg(I) - pointer to local context to use */
317 /* tmp(I) - new value for lock */
318 /* */
319 /* Stub function that allows for external manipulation of ipfr_lock */
320 /* ------------------------------------------------------------------------ */
321 void
322 ipf_frag_setlock(void *arg, int tmp)
323 {
324 ipf_frag_softc_t *softf = arg;
325
326 softf->ipfr_lock = tmp;
327 }
328
329
330 /* ------------------------------------------------------------------------ */
331 /* Function: ipf_frag_stats */
332 /* Returns: ipfrstat_t* - pointer to struct with current frag stats */
333 /* Parameters: arg(I) - pointer to local context to use */
334 /* */
335 /* Updates ipfr_stats with current information and returns a pointer to it */
336 /* ------------------------------------------------------------------------ */
337 ipfrstat_t *
338 ipf_frag_stats(void *arg)
339 {
340 ipf_frag_softc_t *softf = arg;
341
342 softf->ipfr_stats.ifs_table = softf->ipfr_heads;
343 softf->ipfr_stats.ifs_nattab = softf->ipfr_nattab;
344 return (&softf->ipfr_stats);
345 }
346
347
348 /* ------------------------------------------------------------------------ */
349 /* Function: ipfr_frag_new */
350 /* Returns: ipfr_t * - pointer to fragment cache state info or NULL */
351 /* Parameters: fin(I) - pointer to packet information */
352 /* table(I) - pointer to frag table to add to */
353 /* lock(I) - pointer to lock to get a write hold of */
354 /* */
355 /* Add a new entry to the fragment cache, registering it as having come */
356 /* through this box, with the result of the filter operation. */
357 /* */
358 /* If this function succeeds, it returns with a write lock held on "lock". */
359 /* If it fails, no lock is held on return. */
360 /* ------------------------------------------------------------------------ */
361 static ipfr_t *
362 ipfr_frag_new(ipf_main_softc_t *softc, ipf_frag_softc_t *softf,
363 fr_info_t *fin, u_32_t pass, ipfr_t *table[]
364 #ifdef USE_MUTEXES
365 , ipfrwlock_t *lock
366 #endif
367 )
368 {
369 ipfr_t *fra, frag, *fran;
370 u_int idx, off;
371 frentry_t *fr;
372
373 if (softf->ipfr_stats.ifs_inuse >= softf->ipfr_size) {
374 FBUMPD(ifs_maximum);
375 return (NULL);
376 }
377
378 if ((fin->fin_flx & (FI_FRAG|FI_BAD)) != FI_FRAG) {
379 FBUMPD(ifs_newbad);
380 return (NULL);
381 }
382
383 if (pass & FR_FRSTRICT) {
384 if (fin->fin_off != 0) {
385 FBUMPD(ifs_newrestrictnot0);
386 return (NULL);
387 }
388 }
389
390 memset(&frag, 0, sizeof(frag));
391 frag.ipfr_v = fin->fin_v;
392 idx = fin->fin_v;
393 frag.ipfr_p = fin->fin_p;
394 idx += fin->fin_p;
395 frag.ipfr_id = fin->fin_id;
396 idx += fin->fin_id;
397 frag.ipfr_source = fin->fin_fi.fi_src;
398 idx += frag.ipfr_src.s_addr;
399 frag.ipfr_dest = fin->fin_fi.fi_dst;
400 idx += frag.ipfr_dst.s_addr;
401 frag.ipfr_ifp = fin->fin_ifp;
402 idx *= 127;
403 idx %= softf->ipfr_size;
404
405 frag.ipfr_optmsk = fin->fin_fi.fi_optmsk & IPF_OPTCOPY;
406 frag.ipfr_secmsk = fin->fin_fi.fi_secmsk;
407 frag.ipfr_auth = fin->fin_fi.fi_auth;
408
409 off = fin->fin_off >> 3;
410 if (off == 0) {
411 char *ptr;
412 int end;
413
414 #ifdef USE_INET6
415 if (fin->fin_v == 6) {
416
417 ptr = (char *)fin->fin_fraghdr +
418 sizeof(struct ip6_frag);
419 } else
420 #endif
421 {
422 ptr = fin->fin_dp;
423 }
424 end = fin->fin_plen - (ptr - (char *)fin->fin_ip);
425 frag.ipfr_firstend = end >> 3;
426 } else {
427 frag.ipfr_firstend = 0;
428 }
429
430 /*
431 * allocate some memory, if possible, if not, just record that we
432 * failed to do so.
433 */
434 KMALLOC(fran, ipfr_t *);
435 if (fran == NULL) {
436 FBUMPD(ifs_nomem);
437 return (NULL);
438 }
439 memset(fran, 0, sizeof(*fran));
440
441 WRITE_ENTER(lock);
442
443 /*
444 * first, make sure it isn't already there...
445 */
446 for (fra = table[idx]; (fra != NULL); fra = fra->ipfr_hnext)
447 if (!bcmp((char *)&frag.ipfr_ifp, (char *)&fra->ipfr_ifp,
448 IPFR_CMPSZ)) {
449 RWLOCK_EXIT(lock);
450 FBUMPD(ifs_exists);
451 KFREE(fran);
452 return (NULL);
453 }
454
455 fra = fran;
456 fran = NULL;
457 fr = fin->fin_fr;
458 fra->ipfr_rule = fr;
459 if (fr != NULL) {
460 MUTEX_ENTER(&fr->fr_lock);
461 fr->fr_ref++;
462 MUTEX_EXIT(&fr->fr_lock);
463 }
464
465 /*
466 * Insert the fragment into the fragment table, copy the struct used
467 * in the search using bcopy rather than reassign each field.
468 * Set the ttl to the default.
469 */
470 if ((fra->ipfr_hnext = table[idx]) != NULL)
471 table[idx]->ipfr_hprev = &fra->ipfr_hnext;
472 fra->ipfr_hprev = table + idx;
473 fra->ipfr_data = NULL;
474 table[idx] = fra;
475 bcopy((char *)&frag.ipfr_ifp, (char *)&fra->ipfr_ifp, IPFR_CMPSZ);
476 fra->ipfr_v = fin->fin_v;
477 fra->ipfr_p = fin->fin_p;
478 fra->ipfr_ttl = softc->ipf_ticks + softf->ipfr_ttl;
479 fra->ipfr_firstend = frag.ipfr_firstend;
480
481 /*
482 * Compute the offset of the expected start of the next packet.
483 */
484 if (off == 0)
485 fra->ipfr_seen0 = 1;
486 fra->ipfr_off = off + (fin->fin_dlen >> 3);
487 fra->ipfr_pass = pass;
488 fra->ipfr_ref = 1;
489 fra->ipfr_pkts = 1;
490 fra->ipfr_bytes = fin->fin_plen;
491 FBUMP(ifs_inuse);
492 FBUMP(ifs_new);
493 return (fra);
494 }
495
496
497 /* ------------------------------------------------------------------------ */
498 /* Function: ipf_frag_new */
499 /* Returns: int - 0 == success, -1 == error */
500 /* Parameters: fin(I) - pointer to packet information */
501 /* */
502 /* Add a new entry to the fragment cache table based on the current packet */
503 /* ------------------------------------------------------------------------ */
504 int
505 ipf_frag_new(ipf_main_softc_t *softc, fr_info_t *fin, u_32_t pass)
506 {
507 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
508 ipfr_t *fra;
509
510 if (softf->ipfr_lock != 0)
511 return (-1);
512
513 #ifdef USE_MUTEXES
514 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_heads, &softc->ipf_frag);
515 #else
516 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_heads);
517 #endif
518 if (fra != NULL) {
519 *softf->ipfr_tail = fra;
520 fra->ipfr_prev = softf->ipfr_tail;
521 softf->ipfr_tail = &fra->ipfr_next;
522 fra->ipfr_next = NULL;
523 RWLOCK_EXIT(&softc->ipf_frag);
524 }
525 return (fra ? 0 : -1);
526 }
527
528
529 /* ------------------------------------------------------------------------ */
530 /* Function: ipf_frag_natnew */
531 /* Returns: int - 0 == success, -1 == error */
532 /* Parameters: fin(I) - pointer to packet information */
533 /* nat(I) - pointer to NAT structure */
534 /* */
535 /* Create a new NAT fragment cache entry based on the current packet and */
536 /* the NAT structure for this "session". */
537 /* ------------------------------------------------------------------------ */
538 int
539 ipf_frag_natnew(ipf_main_softc_t *softc, fr_info_t *fin, u_32_t pass,
540 nat_t *nat)
541 {
542 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
543 ipfr_t *fra;
544
545 if (softf->ipfr_lock != 0)
546 return (0);
547
548 #ifdef USE_MUTEXES
549 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_nattab,
550 &softf->ipfr_natfrag);
551 #else
552 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_nattab);
553 #endif
554 if (fra != NULL) {
555 fra->ipfr_data = nat;
556 nat->nat_data = fra;
557 *softf->ipfr_nattail = fra;
558 fra->ipfr_prev = softf->ipfr_nattail;
559 softf->ipfr_nattail = &fra->ipfr_next;
560 fra->ipfr_next = NULL;
561 RWLOCK_EXIT(&softf->ipfr_natfrag);
562 return (0);
563 }
564 return (-1);
565 }
566
567
568 /* ------------------------------------------------------------------------ */
569 /* Function: ipf_frag_ipidnew */
570 /* Returns: int - 0 == success, -1 == error */
571 /* Parameters: fin(I) - pointer to packet information */
572 /* ipid(I) - new IP ID for this fragmented packet */
573 /* */
574 /* Create a new fragment cache entry for this packet and store, as a data */
575 /* pointer, the new IP ID value. */
576 /* ------------------------------------------------------------------------ */
577 int
578 ipf_frag_ipidnew(fr_info_t *fin, u_32_t ipid)
579 {
580 ipf_main_softc_t *softc = fin->fin_main_soft;
581 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
582 ipfr_t *fra;
583
584 if (softf->ipfr_lock)
585 return (0);
586
587 #ifdef USE_MUTEXES
588 fra = ipfr_frag_new(softc, softf, fin, 0, softf->ipfr_ipidtab, &softf->ipfr_ipidfrag);
589 #else
590 fra = ipfr_frag_new(softc, softf, fin, 0, softf->ipfr_ipidtab);
591 #endif
592 if (fra != NULL) {
593 fra->ipfr_data = (void *)(intptr_t)ipid;
594 *softf->ipfr_ipidtail = fra;
595 fra->ipfr_prev = softf->ipfr_ipidtail;
596 softf->ipfr_ipidtail = &fra->ipfr_next;
597 fra->ipfr_next = NULL;
598 RWLOCK_EXIT(&softf->ipfr_ipidfrag);
599 }
600 return (fra ? 0 : -1);
601 }
602
603
604 /* ------------------------------------------------------------------------ */
605 /* Function: ipf_frag_lookup */
606 /* Returns: ipfr_t * - pointer to ipfr_t structure if there's a */
607 /* matching entry in the frag table, else NULL */
608 /* Parameters: fin(I) - pointer to packet information */
609 /* table(I) - pointer to fragment cache table to search */
610 /* */
611 /* Check the fragment cache to see if there is already a record of this */
612 /* packet with its filter result known. */
613 /* */
614 /* If this function succeeds, it returns with a write lock held on "lock". */
615 /* If it fails, no lock is held on return. */
616 /* ------------------------------------------------------------------------ */
617 static ipfr_t *
618 ipf_frag_lookup(ipf_main_softc_t *softc, ipf_frag_softc_t *softf,
619 fr_info_t *fin, ipfr_t *table[]
620 #ifdef USE_MUTEXES
621 , ipfrwlock_t *lock
622 #endif
623 )
624 {
625 ipfr_t *f, frag;
626 u_int idx;
627
628 /*
629 * We don't want to let short packets match because they could be
630 * compromising the security of other rules that want to match on
631 * layer 4 fields (and can't because they have been fragmented off.)
632 * Why do this check here? The counter acts as an indicator of this
633 * kind of attack, whereas if it was elsewhere, it wouldn't know if
634 * other matching packets had been seen.
635 */
636 if (fin->fin_flx & FI_SHORT) {
637 FBUMPD(ifs_short);
638 return (NULL);
639 }
640
641 if ((fin->fin_flx & FI_BAD) != 0) {
642 FBUMPD(ifs_bad);
643 return (NULL);
644 }
645
646 /*
647 * For fragments, we record protocol, packet id, TOS and both IP#'s
648 * (these should all be the same for all fragments of a packet).
649 *
650 * build up a hash value to index the table with.
651 */
652 memset(&frag, 0, sizeof(frag));
653 frag.ipfr_v = fin->fin_v;
654 idx = fin->fin_v;
655 frag.ipfr_p = fin->fin_p;
656 idx += fin->fin_p;
657 frag.ipfr_id = fin->fin_id;
658 idx += fin->fin_id;
659 frag.ipfr_source = fin->fin_fi.fi_src;
660 idx += frag.ipfr_src.s_addr;
661 frag.ipfr_dest = fin->fin_fi.fi_dst;
662 idx += frag.ipfr_dst.s_addr;
663 frag.ipfr_ifp = fin->fin_ifp;
664 idx *= 127;
665 idx %= softf->ipfr_size;
666
667 frag.ipfr_optmsk = fin->fin_fi.fi_optmsk & IPF_OPTCOPY;
668 frag.ipfr_secmsk = fin->fin_fi.fi_secmsk;
669 frag.ipfr_auth = fin->fin_fi.fi_auth;
670
671 READ_ENTER(lock);
672
673 /*
674 * check the table, careful to only compare the right amount of data
675 */
676 for (f = table[idx]; f; f = f->ipfr_hnext) {
677 if (!bcmp((char *)&frag.ipfr_ifp, (char *)&f->ipfr_ifp,
678 IPFR_CMPSZ)) {
679 u_short off;
680
681 /*
682 * XXX - We really need to be guarding against the
683 * retransmission of (src,dst,id,offset-range) here
684 * because a fragmented packet is never resent with
685 * the same IP ID# (or shouldn't).
686 */
687 off = fin->fin_off >> 3;
688 if (f->ipfr_seen0) {
689 if (off == 0) {
690 FBUMPD(ifs_retrans0);
691 continue;
692 }
693
694 /*
695 * Case 3. See comment for frpr_fragment6.
696 */
697 if ((f->ipfr_firstend != 0) &&
698 (off < f->ipfr_firstend)) {
699 FBUMP(ifs_overlap);
700 DT2(ifs_overlap, u_short, off,
701 ipfr_t *, f);
702 DT3(ipf_fi_bad_ifs_overlap, fr_info_t *, fin, u_short, off,
703 ipfr_t *, f);
704 fin->fin_flx |= FI_BAD;
705 break;
706 }
707 } else if (off == 0)
708 f->ipfr_seen0 = 1;
709
710 if (f != table[idx] && MUTEX_TRY_UPGRADE(lock)) {
711 ipfr_t **fp;
712
713 /*
714 * Move fragment info. to the top of the list
715 * to speed up searches. First, delink...
716 */
717 fp = f->ipfr_hprev;
718 (*fp) = f->ipfr_hnext;
719 if (f->ipfr_hnext != NULL)
720 f->ipfr_hnext->ipfr_hprev = fp;
721 /*
722 * Then put back at the top of the chain.
723 */
724 f->ipfr_hnext = table[idx];
725 table[idx]->ipfr_hprev = &f->ipfr_hnext;
726 f->ipfr_hprev = table + idx;
727 table[idx] = f;
728 MUTEX_DOWNGRADE(lock);
729 }
730
731 /*
732 * If we've follwed the fragments, and this is the
733 * last (in order), shrink expiration time.
734 */
735 if (off == f->ipfr_off) {
736 f->ipfr_off = (fin->fin_dlen >> 3) + off;
737
738 /*
739 * Well, we could shrink the expiration time
740 * but only if every fragment has been seen
741 * in order upto this, the last. ipfr_badorder
742 * is used here to count those out of order
743 * and if it equals 0 when we get to the last
744 * fragment then we can assume all of the
745 * fragments have been seen and in order.
746 */
747 #if 0
748 /*
749 * Doing this properly requires moving it to
750 * the head of the list which is infesible.
751 */
752 if ((more == 0) && (f->ipfr_badorder == 0))
753 f->ipfr_ttl = softc->ipf_ticks + 1;
754 #endif
755 } else {
756 f->ipfr_badorder++;
757 FBUMPD(ifs_unordered);
758 if (f->ipfr_pass & FR_FRSTRICT) {
759 FBUMPD(ifs_strict);
760 continue;
761 }
762 }
763 f->ipfr_pkts++;
764 f->ipfr_bytes += fin->fin_plen;
765 FBUMP(ifs_hits);
766 return (f);
767 }
768 }
769
770 RWLOCK_EXIT(lock);
771 FBUMP(ifs_miss);
772 return (NULL);
773 }
774
775
776 /* ------------------------------------------------------------------------ */
777 /* Function: ipf_frag_natknown */
778 /* Returns: nat_t* - pointer to 'parent' NAT structure if frag table */
779 /* match found, else NULL */
780 /* Parameters: fin(I) - pointer to packet information */
781 /* */
782 /* Functional interface for NAT lookups of the NAT fragment cache */
783 /* ------------------------------------------------------------------------ */
784 nat_t *
785 ipf_frag_natknown(fr_info_t *fin)
786 {
787 ipf_main_softc_t *softc = fin->fin_main_soft;
788 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
789 nat_t *nat;
790 ipfr_t *ipf;
791
792 if ((softf->ipfr_lock) || !softf->ipfr_natlist)
793 return (NULL);
794 #ifdef USE_MUTEXES
795 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_nattab,
796 &softf->ipfr_natfrag);
797 #else
798 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_nattab);
799 #endif
800 if (ipf != NULL) {
801 nat = ipf->ipfr_data;
802 /*
803 * This is the last fragment for this packet.
804 */
805 if ((ipf->ipfr_ttl == softc->ipf_ticks + 1) && (nat != NULL)) {
806 nat->nat_data = NULL;
807 ipf->ipfr_data = NULL;
808 }
809 RWLOCK_EXIT(&softf->ipfr_natfrag);
810 } else
811 nat = NULL;
812 return (nat);
813 }
814
815
816 /* ------------------------------------------------------------------------ */
817 /* Function: ipf_frag_ipidknown */
818 /* Returns: u_32_t - IPv4 ID for this packet if match found, else */
819 /* return 0xfffffff to indicate no match. */
820 /* Parameters: fin(I) - pointer to packet information */
821 /* */
822 /* Functional interface for IP ID lookups of the IP ID fragment cache */
823 /* ------------------------------------------------------------------------ */
824 u_32_t
825 ipf_frag_ipidknown(fr_info_t *fin)
826 {
827 ipf_main_softc_t *softc = fin->fin_main_soft;
828 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
829 ipfr_t *ipf;
830 u_32_t id;
831
832 if (softf->ipfr_lock || !softf->ipfr_ipidlist)
833 return (0xffffffff);
834
835 #ifdef USE_MUTEXES
836 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_ipidtab,
837 &softf->ipfr_ipidfrag);
838 #else
839 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_ipidtab);
840 #endif
841 if (ipf != NULL) {
842 id = (u_32_t)(intptr_t)ipf->ipfr_data;
843 RWLOCK_EXIT(&softf->ipfr_ipidfrag);
844 } else
845 id = 0xffffffff;
846 return (id);
847 }
848
849
850 /* ------------------------------------------------------------------------ */
851 /* Function: ipf_frag_known */
852 /* Returns: frentry_t* - pointer to filter rule if a match is found in */
853 /* the frag cache table, else NULL. */
854 /* Parameters: fin(I) - pointer to packet information */
855 /* passp(O) - pointer to where to store rule flags resturned */
856 /* */
857 /* Functional interface for normal lookups of the fragment cache. If a */
858 /* match is found, return the rule pointer and flags from the rule, except */
859 /* that if FR_LOGFIRST is set, reset FR_LOG. */
860 /* ------------------------------------------------------------------------ */
861 frentry_t *
862 ipf_frag_known(fr_info_t *fin, u_32_t *passp)
863 {
864 ipf_main_softc_t *softc = fin->fin_main_soft;
865 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
866 frentry_t *fr = NULL;
867 ipfr_t *fra;
868 u_32_t pass;
869
870 if ((softf->ipfr_lock) || (softf->ipfr_list == NULL))
871 return (NULL);
872
873 #ifdef USE_MUTEXES
874 fra = ipf_frag_lookup(softc, softf, fin, softf->ipfr_heads,
875 &softc->ipf_frag);
876 #else
877 fra = ipf_frag_lookup(softc, softf, fin, softf->ipfr_heads);
878 #endif
879 if (fra != NULL) {
880 if (fin->fin_flx & FI_BAD) {
881 fr = &ipfr_block;
882 fin->fin_reason = FRB_BADFRAG;
883 DT2(ipf_frb_badfrag, fr_info_t *, fin, uint, fra);
884 } else {
885 fr = fra->ipfr_rule;
886 }
887 fin->fin_fr = fr;
888 if (fr != NULL) {
889 pass = fr->fr_flags;
890 if ((pass & FR_KEEPSTATE) != 0) {
891 fin->fin_flx |= FI_STATE;
892 /*
893 * Reset the keep state flag here so that we
894 * don't try and add a new state entry because
895 * of a match here. That leads to blocking of
896 * the packet later because the add fails.
897 */
898 pass &= ~FR_KEEPSTATE;
899 }
900 if ((pass & FR_LOGFIRST) != 0)
901 pass &= ~(FR_LOGFIRST|FR_LOG);
902 *passp = pass;
903 }
904 RWLOCK_EXIT(&softc->ipf_frag);
905 }
906 return (fr);
907 }
908
909
910 /* ------------------------------------------------------------------------ */
911 /* Function: ipf_frag_natforget */
912 /* Returns: Nil */
913 /* Parameters: softc(I) - pointer to soft context main structure */
914 /* ptr(I) - pointer to data structure */
915 /* */
916 /* Search through all of the fragment cache entries for NAT and wherever a */
917 /* pointer is found to match ptr, reset it to NULL. */
918 /* ------------------------------------------------------------------------ */
919 void
920 ipf_frag_natforget(ipf_main_softc_t *softc, void *ptr)
921 {
922 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
923 ipfr_t *fr;
924
925 WRITE_ENTER(&softf->ipfr_natfrag);
926 for (fr = softf->ipfr_natlist; fr; fr = fr->ipfr_next)
927 if (fr->ipfr_data == ptr)
928 fr->ipfr_data = NULL;
929 RWLOCK_EXIT(&softf->ipfr_natfrag);
930 }
931
932
933 /* ------------------------------------------------------------------------ */
934 /* Function: ipf_frag_delete */
935 /* Returns: Nil */
936 /* Parameters: softc(I) - pointer to soft context main structure */
937 /* fra(I) - pointer to fragment structure to delete */
938 /* tail(IO) - pointer to the pointer to the tail of the frag */
939 /* list */
940 /* */
941 /* Remove a fragment cache table entry from the table & list. Also free */
942 /* the filter rule it is associated with it if it is no longer used as a */
943 /* result of decreasing the reference count. */
944 /* ------------------------------------------------------------------------ */
945 static void
946 ipf_frag_delete(ipf_main_softc_t *softc, ipfr_t *fra, ipfr_t ***tail)
947 {
948 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
949
950 if (fra->ipfr_next)
951 fra->ipfr_next->ipfr_prev = fra->ipfr_prev;
952 *fra->ipfr_prev = fra->ipfr_next;
953 if (*tail == &fra->ipfr_next)
954 *tail = fra->ipfr_prev;
955
956 if (fra->ipfr_hnext)
957 fra->ipfr_hnext->ipfr_hprev = fra->ipfr_hprev;
958 *fra->ipfr_hprev = fra->ipfr_hnext;
959
960 if (fra->ipfr_rule != NULL) {
961 (void) ipf_derefrule(softc, &fra->ipfr_rule);
962 }
963
964 if (fra->ipfr_ref <= 0)
965 ipf_frag_free(softf, fra);
966 }
967
968
969 /* ------------------------------------------------------------------------ */
970 /* Function: ipf_frag_free */
971 /* Returns: Nil */
972 /* Parameters: softf(I) - pointer to fragment context information */
973 /* fra(I) - pointer to fragment structure to free */
974 /* */
975 /* Free up a fragment cache entry and bump relevent statistics. */
976 /* ------------------------------------------------------------------------ */
977 static void
978 ipf_frag_free(ipf_frag_softc_t *softf, ipfr_t *fra)
979 {
980 KFREE(fra);
981 FBUMP(ifs_expire);
982 softf->ipfr_stats.ifs_inuse--;
983 }
984
985
986 /* ------------------------------------------------------------------------ */
987 /* Function: ipf_frag_clear */
988 /* Returns: Nil */
989 /* Parameters: softc(I) - pointer to soft context main structure */
990 /* */
991 /* Free memory in use by fragment state information kept. Do the normal */
992 /* fragment state stuff first and then the NAT-fragment table. */
993 /* ------------------------------------------------------------------------ */
994 void
995 ipf_frag_clear(ipf_main_softc_t *softc)
996 {
997 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
998 ipfr_t *fra;
999 nat_t *nat;
1000
1001 WRITE_ENTER(&softc->ipf_frag);
1002 while ((fra = softf->ipfr_list) != NULL) {
1003 fra->ipfr_ref--;
1004 ipf_frag_delete(softc, fra, &softf->ipfr_tail);
1005 }
1006 softf->ipfr_tail = &softf->ipfr_list;
1007 RWLOCK_EXIT(&softc->ipf_frag);
1008
1009 WRITE_ENTER(&softc->ipf_nat);
1010 WRITE_ENTER(&softf->ipfr_natfrag);
1011 while ((fra = softf->ipfr_natlist) != NULL) {
1012 nat = fra->ipfr_data;
1013 if (nat != NULL) {
1014 if (nat->nat_data == fra)
1015 nat->nat_data = NULL;
1016 }
1017 fra->ipfr_ref--;
1018 ipf_frag_delete(softc, fra, &softf->ipfr_nattail);
1019 }
1020 softf->ipfr_nattail = &softf->ipfr_natlist;
1021 RWLOCK_EXIT(&softf->ipfr_natfrag);
1022 RWLOCK_EXIT(&softc->ipf_nat);
1023 }
1024
1025
1026 /* ------------------------------------------------------------------------ */
1027 /* Function: ipf_frag_expire */
1028 /* Returns: Nil */
1029 /* Parameters: softc(I) - pointer to soft context main structure */
1030 /* */
1031 /* Expire entries in the fragment cache table that have been there too long */
1032 /* ------------------------------------------------------------------------ */
1033 void
1034 ipf_frag_expire(ipf_main_softc_t *softc)
1035 {
1036 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
1037 ipfr_t **fp, *fra;
1038 nat_t *nat;
1039 SPL_INT(s);
1040
1041 if (softf->ipfr_lock)
1042 return;
1043
1044 SPL_NET(s);
1045 WRITE_ENTER(&softc->ipf_frag);
1046 /*
1047 * Go through the entire table, looking for entries to expire,
1048 * which is indicated by the ttl being less than or equal to ipf_ticks.
1049 */
1050 for (fp = &softf->ipfr_list; ((fra = *fp) != NULL); ) {
1051 if (fra->ipfr_ttl > softc->ipf_ticks)
1052 break;
1053 fra->ipfr_ref--;
1054 ipf_frag_delete(softc, fra, &softf->ipfr_tail);
1055 }
1056 RWLOCK_EXIT(&softc->ipf_frag);
1057
1058 WRITE_ENTER(&softf->ipfr_ipidfrag);
1059 for (fp = &softf->ipfr_ipidlist; ((fra = *fp) != NULL); ) {
1060 if (fra->ipfr_ttl > softc->ipf_ticks)
1061 break;
1062 fra->ipfr_ref--;
1063 ipf_frag_delete(softc, fra, &softf->ipfr_ipidtail);
1064 }
1065 RWLOCK_EXIT(&softf->ipfr_ipidfrag);
1066
1067 /*
1068 * Same again for the NAT table, except that if the structure also
1069 * still points to a NAT structure, and the NAT structure points back
1070 * at the one to be free'd, NULL the reference from the NAT struct.
1071 * NOTE: We need to grab both mutex's early, and in this order so as
1072 * to prevent a deadlock if both try to expire at the same time.
1073 * The extra if() statement here is because it locks out all NAT
1074 * operations - no need to do that if there are no entries in this
1075 * list, right?
1076 */
1077 if (softf->ipfr_natlist != NULL) {
1078 WRITE_ENTER(&softc->ipf_nat);
1079 WRITE_ENTER(&softf->ipfr_natfrag);
1080 for (fp = &softf->ipfr_natlist; ((fra = *fp) != NULL); ) {
1081 if (fra->ipfr_ttl > softc->ipf_ticks)
1082 break;
1083 nat = fra->ipfr_data;
1084 if (nat != NULL) {
1085 if (nat->nat_data == fra)
1086 nat->nat_data = NULL;
1087 }
1088 fra->ipfr_ref--;
1089 ipf_frag_delete(softc, fra, &softf->ipfr_nattail);
1090 }
1091 RWLOCK_EXIT(&softf->ipfr_natfrag);
1092 RWLOCK_EXIT(&softc->ipf_nat);
1093 }
1094 SPL_X(s);
1095 }
1096
1097
1098 /* ------------------------------------------------------------------------ */
1099 /* Function: ipf_frag_pkt_next */
1100 /* Returns: int - 0 == success, else error */
1101 /* Parameters: softc(I) - pointer to soft context main structure */
1102 /* token(I) - pointer to token information for this caller */
1103 /* itp(I) - pointer to generic iterator from caller */
1104 /* */
1105 /* This function is used to step through the fragment cache list used for */
1106 /* filter rules. The hard work is done by the more generic ipf_frag_next. */
1107 /* ------------------------------------------------------------------------ */
1108 int
1109 ipf_frag_pkt_next(ipf_main_softc_t *softc, ipftoken_t *token,
1110 ipfgeniter_t *itp)
1111 {
1112 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
1113
1114 #ifdef USE_MUTEXES
1115 return (ipf_frag_next(softc, token, itp, &softf->ipfr_list,
1116 &softf->ipfr_frag));
1117 #else
1118 return (ipf_frag_next(softc, token, itp, &softf->ipfr_list));
1119 #endif
1120 }
1121
1122
1123 /* ------------------------------------------------------------------------ */
1124 /* Function: ipf_frag_nat_next */
1125 /* Returns: int - 0 == success, else error */
1126 /* Parameters: softc(I) - pointer to soft context main structure */
1127 /* token(I) - pointer to token information for this caller */
1128 /* itp(I) - pointer to generic iterator from caller */
1129 /* */
1130 /* This function is used to step through the fragment cache list used for */
1131 /* NAT. The hard work is done by the more generic ipf_frag_next. */
1132 /* ------------------------------------------------------------------------ */
1133 int
1134 ipf_frag_nat_next(ipf_main_softc_t *softc, ipftoken_t *token,
1135 ipfgeniter_t *itp)
1136 {
1137 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
1138
1139 #ifdef USE_MUTEXES
1140 return (ipf_frag_next(softc, token, itp, &softf->ipfr_natlist,
1141 &softf->ipfr_natfrag));
1142 #else
1143 return (ipf_frag_next(softc, token, itp, &softf->ipfr_natlist));
1144 #endif
1145 }
1146
1147 /* ------------------------------------------------------------------------ */
1148 /* Function: ipf_frag_next */
1149 /* Returns: int - 0 == success, else error */
1150 /* Parameters: softc(I) - pointer to soft context main structure */
1151 /* token(I) - pointer to token information for this caller */
1152 /* itp(I) - pointer to generic iterator from caller */
1153 /* top(I) - top of the fragment list */
1154 /* lock(I) - fragment cache lock */
1155 /* */
1156 /* This function is used to interate through the list of entries in the */
1157 /* fragment cache. It increases the reference count on the one currently */
1158 /* being returned so that the caller can come back and resume from it later.*/
1159 /* */
1160 /* This function is used for both the NAT fragment cache as well as the ipf */
1161 /* fragment cache - hence the reason for passing in top and lock. */
1162 /* ------------------------------------------------------------------------ */
1163 static int
1164 ipf_frag_next(ipf_main_softc_t *softc, ipftoken_t *token, ipfgeniter_t *itp,
1165 ipfr_t **top
1166 #ifdef USE_MUTEXES
1167 , ipfrwlock_t *lock
1168 #endif
1169 )
1170 {
1171 ipfr_t *frag, *next, zero;
1172 int error = 0;
1173
1174 if (itp->igi_data == NULL) {
1175 IPFERROR(20001);
1176 return (EFAULT);
1177 }
1178
1179 if (itp->igi_nitems != 1) {
1180 IPFERROR(20003);
1181 return (EFAULT);
1182 }
1183
1184 frag = token->ipt_data;
1185
1186 READ_ENTER(lock);
1187
1188 if (frag == NULL)
1189 next = *top;
1190 else
1191 next = frag->ipfr_next;
1192
1193 if (next != NULL) {
1194 ATOMIC_INC(next->ipfr_ref);
1195 token->ipt_data = next;
1196 } else {
1197 bzero(&zero, sizeof(zero));
1198 next = &zero;
1199 token->ipt_data = NULL;
1200 }
1201 if (next->ipfr_next == NULL)
1202 ipf_token_mark_complete(token);
1203
1204 RWLOCK_EXIT(lock);
1205
1206 error = COPYOUT(next, itp->igi_data, sizeof(*next));
1207 if (error != 0)
1208 IPFERROR(20002);
1209
1210 if (frag != NULL) {
1211 #ifdef USE_MUTEXES
1212 ipf_frag_deref(softc, &frag, lock);
1213 #else
1214 ipf_frag_deref(softc, &frag);
1215 #endif
1216 }
1217 return (error);
1218 }
1219
1220
1221 /* ------------------------------------------------------------------------ */
1222 /* Function: ipf_frag_pkt_deref */
1223 /* Returns: Nil */
1224 /* Parameters: softc(I) - pointer to soft context main structure */
1225 /* data(I) - pointer to frag cache pointer */
1226 /* */
1227 /* This function is the external interface for dropping a reference to a */
1228 /* fragment cache entry used by filter rules. */
1229 /* ------------------------------------------------------------------------ */
1230 void
1231 ipf_frag_pkt_deref(ipf_main_softc_t *softc, void *data)
1232 {
1233 ipfr_t **frp = data;
1234
1235 #ifdef USE_MUTEXES
1236 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
1237
1238 ipf_frag_deref(softc->ipf_frag_soft, frp, &softf->ipfr_frag);
1239 #else
1240 ipf_frag_deref(softc->ipf_frag_soft, frp);
1241 #endif
1242 }
1243
1244
1245 /* ------------------------------------------------------------------------ */
1246 /* Function: ipf_frag_nat_deref */
1247 /* Returns: Nil */
1248 /* Parameters: softc(I) - pointer to soft context main structure */
1249 /* data(I) - pointer to frag cache pointer */
1250 /* */
1251 /* This function is the external interface for dropping a reference to a */
1252 /* fragment cache entry used by NAT table entries. */
1253 /* ------------------------------------------------------------------------ */
1254 void
1255 ipf_frag_nat_deref(ipf_main_softc_t *softc, void *data)
1256 {
1257 ipfr_t **frp = data;
1258
1259 #ifdef USE_MUTEXES
1260 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
1261
1262 ipf_frag_deref(softc->ipf_frag_soft, frp, &softf->ipfr_natfrag);
1263 #else
1264 ipf_frag_deref(softc->ipf_frag_soft, frp);
1265 #endif
1266 }
1267
1268
1269 /* ------------------------------------------------------------------------ */
1270 /* Function: ipf_frag_deref */
1271 /* Returns: Nil */
1272 /* Parameters: frp(IO) - pointer to fragment structure to deference */
1273 /* lock(I) - lock associated with the fragment */
1274 /* */
1275 /* This function dereferences a fragment structure (ipfr_t). The pointer */
1276 /* passed in will always be reset back to NULL, even if the structure is */
1277 /* not freed, to enforce the notion that the caller is no longer entitled */
1278 /* to use the pointer it is dropping the reference to. */
1279 /* ------------------------------------------------------------------------ */
1280 static void
1281 ipf_frag_deref(void *arg, ipfr_t **frp
1282 #ifdef USE_MUTEXES
1283 , ipfrwlock_t *lock
1284 #endif
1285 )
1286 {
1287 ipf_frag_softc_t *softf = arg;
1288 ipfr_t *fra;
1289
1290 fra = *frp;
1291 *frp = NULL;
1292
1293 WRITE_ENTER(lock);
1294 fra->ipfr_ref--;
1295 if (fra->ipfr_ref <= 0)
1296 ipf_frag_free(softf, fra);
1297 RWLOCK_EXIT(lock);
1298 }
Cache object: 573e32022fa73e2453e98df480f34b84
|