FreeBSD/Linux Kernel Cross Reference
sys/net/pf_norm.c
1 /* $OpenBSD: pf_norm.c,v 1.226 2022/11/06 18:05:05 dlg Exp $ */
2
3 /*
4 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5 * Copyright 2009 Henning Brauer <henning@openbsd.org>
6 * Copyright 2011-2018 Alexander Bluhm <bluhm@openbsd.org>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include "pflog.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/filio.h>
36 #include <sys/fcntl.h>
37 #include <sys/socket.h>
38 #include <sys/kernel.h>
39 #include <sys/time.h>
40 #include <sys/pool.h>
41 #include <sys/syslog.h>
42 #include <sys/mutex.h>
43
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/if_pflog.h>
47
48 #include <netinet/in.h>
49 #include <netinet/ip.h>
50 #include <netinet/ip_var.h>
51 #include <netinet/ip_icmp.h>
52 #include <netinet/tcp.h>
53 #include <netinet/tcp_seq.h>
54 #include <netinet/tcp_fsm.h>
55 #include <netinet/udp.h>
56
57 #ifdef INET6
58 #include <netinet6/in6_var.h>
59 #include <netinet/ip6.h>
60 #include <netinet6/ip6_var.h>
61 #include <netinet/icmp6.h>
62 #include <netinet6/nd6.h>
63 #endif /* INET6 */
64
65 #include <net/pfvar.h>
66 #include <net/pfvar_priv.h>
67
68 struct pf_frent {
69 TAILQ_ENTRY(pf_frent) fr_next;
70 struct mbuf *fe_m;
71 u_int16_t fe_hdrlen; /* ipv4 header length with ip options
72 ipv6, extension, fragment header */
73 u_int16_t fe_extoff; /* last extension header offset or 0 */
74 u_int16_t fe_len; /* fragment length */
75 u_int16_t fe_off; /* fragment offset */
76 u_int16_t fe_mff; /* more fragment flag */
77 };
78
79 RB_HEAD(pf_frag_tree, pf_fragment);
80 struct pf_frnode {
81 struct pf_addr fn_src; /* ip source address */
82 struct pf_addr fn_dst; /* ip destination address */
83 sa_family_t fn_af; /* address family */
84 u_int8_t fn_proto; /* protocol for fragments in fn_tree */
85 u_int8_t fn_direction; /* pf packet direction */
86 u_int32_t fn_fragments; /* number of entries in fn_tree */
87 u_int32_t fn_gen; /* fr_gen of newest entry in fn_tree */
88
89 RB_ENTRY(pf_frnode) fn_entry;
90 struct pf_frag_tree fn_tree; /* matching fragments, lookup by id */
91 };
92
93 struct pf_fragment {
94 struct pf_frent *fr_firstoff[PF_FRAG_ENTRY_POINTS];
95 /* pointers to queue element */
96 u_int8_t fr_entries[PF_FRAG_ENTRY_POINTS];
97 /* count entries between pointers */
98 RB_ENTRY(pf_fragment) fr_entry;
99 TAILQ_ENTRY(pf_fragment) frag_next;
100 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
101 u_int32_t fr_id; /* fragment id for reassemble */
102 int32_t fr_timeout;
103 u_int32_t fr_gen; /* generation number (per pf_frnode) */
104 u_int16_t fr_maxlen; /* maximum length of single fragment */
105 u_int16_t fr_holes; /* number of holes in the queue */
106 struct pf_frnode *fr_node; /* ip src/dst/proto/af for fragments */
107 };
108
109 struct pf_fragment_tag {
110 u_int16_t ft_hdrlen; /* header length of reassembled pkt */
111 u_int16_t ft_extoff; /* last extension header offset or 0 */
112 u_int16_t ft_maxlen; /* maximum fragment payload length */
113 };
114
115 TAILQ_HEAD(pf_fragqueue, pf_fragment) pf_fragqueue;
116
117 static __inline int pf_frnode_compare(struct pf_frnode *,
118 struct pf_frnode *);
119 RB_HEAD(pf_frnode_tree, pf_frnode) pf_frnode_tree;
120 RB_PROTOTYPE(pf_frnode_tree, pf_frnode, fn_entry, pf_frnode_compare);
121 RB_GENERATE(pf_frnode_tree, pf_frnode, fn_entry, pf_frnode_compare);
122
123 static __inline int pf_frag_compare(struct pf_fragment *,
124 struct pf_fragment *);
125 RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
126 RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
127
128 /* Private prototypes */
129 void pf_flush_fragments(void);
130 void pf_free_fragment(struct pf_fragment *);
131 struct pf_fragment *pf_find_fragment(struct pf_frnode *, u_int32_t);
132 struct pf_frent *pf_create_fragment(u_short *);
133 int pf_frent_holes(struct pf_frent *);
134 static inline int pf_frent_index(struct pf_frent *);
135 int pf_frent_insert(struct pf_fragment *,
136 struct pf_frent *, struct pf_frent *);
137 void pf_frent_remove(struct pf_fragment *,
138 struct pf_frent *);
139 struct pf_frent *pf_frent_previous(struct pf_fragment *,
140 struct pf_frent *);
141 struct pf_fragment *pf_fillup_fragment(struct pf_frnode *, u_int32_t,
142 struct pf_frent *, u_short *);
143 struct mbuf *pf_join_fragment(struct pf_fragment *);
144 int pf_reassemble(struct mbuf **, int, u_short *);
145 #ifdef INET6
146 int pf_reassemble6(struct mbuf **, struct ip6_frag *,
147 u_int16_t, u_int16_t, int, u_short *);
148 #endif /* INET6 */
149
150 /* Globals */
151 struct pool pf_frent_pl, pf_frag_pl, pf_frnode_pl;
152 struct pool pf_state_scrub_pl;
153 int pf_nfrents;
154
155 struct mutex pf_frag_mtx;
156
157 #define PF_FRAG_LOCK_INIT() mtx_init(&pf_frag_mtx, IPL_SOFTNET)
158 #define PF_FRAG_LOCK() mtx_enter(&pf_frag_mtx)
159 #define PF_FRAG_UNLOCK() mtx_leave(&pf_frag_mtx)
160
161 void
162 pf_normalize_init(void)
163 {
164 pool_init(&pf_frent_pl, sizeof(struct pf_frent), 0,
165 IPL_SOFTNET, 0, "pffrent", NULL);
166 pool_init(&pf_frnode_pl, sizeof(struct pf_frnode), 0,
167 IPL_SOFTNET, 0, "pffrnode", NULL);
168 pool_init(&pf_frag_pl, sizeof(struct pf_fragment), 0,
169 IPL_SOFTNET, 0, "pffrag", NULL);
170 pool_init(&pf_state_scrub_pl, sizeof(struct pf_state_scrub), 0,
171 IPL_SOFTNET, 0, "pfstscr", NULL);
172
173 pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT);
174 pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
175
176 TAILQ_INIT(&pf_fragqueue);
177
178 PF_FRAG_LOCK_INIT();
179 }
180
181 static __inline int
182 pf_frnode_compare(struct pf_frnode *a, struct pf_frnode *b)
183 {
184 int diff;
185
186 if ((diff = a->fn_proto - b->fn_proto) != 0)
187 return (diff);
188 if ((diff = a->fn_af - b->fn_af) != 0)
189 return (diff);
190 if ((diff = pf_addr_compare(&a->fn_src, &b->fn_src, a->fn_af)) != 0)
191 return (diff);
192 if ((diff = pf_addr_compare(&a->fn_dst, &b->fn_dst, a->fn_af)) != 0)
193 return (diff);
194
195 return (0);
196 }
197
198 static __inline int
199 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
200 {
201 int diff;
202
203 if ((diff = a->fr_id - b->fr_id) != 0)
204 return (diff);
205
206 return (0);
207 }
208
209 void
210 pf_purge_expired_fragments(void)
211 {
212 struct pf_fragment *frag;
213 int32_t expire;
214
215 PF_ASSERT_UNLOCKED();
216
217 expire = getuptime() - pf_default_rule.timeout[PFTM_FRAG];
218
219 PF_FRAG_LOCK();
220 while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) {
221 if (frag->fr_timeout > expire)
222 break;
223 DPFPRINTF(LOG_NOTICE, "expiring %d(%p)", frag->fr_id, frag);
224 pf_free_fragment(frag);
225 }
226 PF_FRAG_UNLOCK();
227 }
228
229 /*
230 * Try to flush old fragments to make space for new ones
231 */
232 void
233 pf_flush_fragments(void)
234 {
235 struct pf_fragment *frag;
236 int goal;
237
238 goal = pf_nfrents * 9 / 10;
239 DPFPRINTF(LOG_NOTICE, "trying to free > %d frents", pf_nfrents - goal);
240 while (goal < pf_nfrents) {
241 if ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) == NULL)
242 break;
243 pf_free_fragment(frag);
244 }
245 }
246
247 /*
248 * Remove a fragment from the fragment queue, free its fragment entries,
249 * and free the fragment itself.
250 */
251 void
252 pf_free_fragment(struct pf_fragment *frag)
253 {
254 struct pf_frent *frent;
255 struct pf_frnode *frnode;
256
257 frnode = frag->fr_node;
258 RB_REMOVE(pf_frag_tree, &frnode->fn_tree, frag);
259 KASSERT(frnode->fn_fragments >= 1);
260 frnode->fn_fragments--;
261 if (frnode->fn_fragments == 0) {
262 KASSERT(RB_EMPTY(&frnode->fn_tree));
263 RB_REMOVE(pf_frnode_tree, &pf_frnode_tree, frnode);
264 pool_put(&pf_frnode_pl, frnode);
265 }
266 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
267
268 /* Free all fragment entries */
269 while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
270 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
271 m_freem(frent->fe_m);
272 pool_put(&pf_frent_pl, frent);
273 pf_nfrents--;
274 }
275 pool_put(&pf_frag_pl, frag);
276 }
277
278 struct pf_fragment *
279 pf_find_fragment(struct pf_frnode *key, u_int32_t id)
280 {
281 struct pf_fragment *frag, idkey;
282 struct pf_frnode *frnode;
283 u_int32_t stale;
284
285 frnode = RB_FIND(pf_frnode_tree, &pf_frnode_tree, key);
286 if (frnode == NULL)
287 return (NULL);
288 KASSERT(frnode->fn_fragments >= 1);
289 idkey.fr_id = id;
290 frag = RB_FIND(pf_frag_tree, &frnode->fn_tree, &idkey);
291 if (frag == NULL)
292 return (NULL);
293 /*
294 * Limit the number of fragments we accept for each (proto,src,dst,af)
295 * combination (aka pf_frnode), so we can deal better with a high rate
296 * of fragments. Problem analysis is in RFC 4963.
297 * Store the current generation for each pf_frnode in fn_gen and on
298 * lookup discard 'stale' fragments (pf_fragment, based on the fr_gen
299 * member). Instead of adding another button interpret the pf fragment
300 * timeout in multiples of 200 fragments. This way the default of 60s
301 * means: pf_fragment objects older than 60*200 = 12,000 generations
302 * are considered stale.
303 */
304 stale = pf_default_rule.timeout[PFTM_FRAG] * PF_FRAG_STALE;
305 if ((frnode->fn_gen - frag->fr_gen) >= stale) {
306 DPFPRINTF(LOG_NOTICE, "stale fragment %d(%p), gen %u, num %u",
307 frag->fr_id, frag, frag->fr_gen, frnode->fn_fragments);
308 pf_free_fragment(frag);
309 return (NULL);
310 }
311 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
312 TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
313
314 return (frag);
315 }
316
317 struct pf_frent *
318 pf_create_fragment(u_short *reason)
319 {
320 struct pf_frent *frent;
321
322 frent = pool_get(&pf_frent_pl, PR_NOWAIT);
323 if (frent == NULL) {
324 pf_flush_fragments();
325 frent = pool_get(&pf_frent_pl, PR_NOWAIT);
326 if (frent == NULL) {
327 REASON_SET(reason, PFRES_MEMORY);
328 return (NULL);
329 }
330 }
331 pf_nfrents++;
332
333 return (frent);
334 }
335
336 /*
337 * Calculate the additional holes that were created in the fragment
338 * queue by inserting this fragment. A fragment in the middle
339 * creates one more hole by splitting. For each connected side,
340 * it loses one hole.
341 * Fragment entry must be in the queue when calling this function.
342 */
343 int
344 pf_frent_holes(struct pf_frent *frent)
345 {
346 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
347 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
348 int holes = 1;
349
350 if (prev == NULL) {
351 if (frent->fe_off == 0)
352 holes--;
353 } else {
354 KASSERT(frent->fe_off != 0);
355 if (frent->fe_off == prev->fe_off + prev->fe_len)
356 holes--;
357 }
358 if (next == NULL) {
359 if (!frent->fe_mff)
360 holes--;
361 } else {
362 KASSERT(frent->fe_mff);
363 if (next->fe_off == frent->fe_off + frent->fe_len)
364 holes--;
365 }
366 return holes;
367 }
368
369 static inline int
370 pf_frent_index(struct pf_frent *frent)
371 {
372 /*
373 * We have an array of 16 entry points to the queue. A full size
374 * 65535 octet IP packet can have 8192 fragments. So the queue
375 * traversal length is at most 512 and at most 16 entry points are
376 * checked. We need 128 additional bytes on a 64 bit architecture.
377 */
378 CTASSERT(((u_int16_t)0xffff &~ 7) / (0x10000 / PF_FRAG_ENTRY_POINTS) ==
379 16 - 1);
380 CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1);
381
382 return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS);
383 }
384
385 int
386 pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent,
387 struct pf_frent *prev)
388 {
389 CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff);
390 int index;
391
392 /*
393 * A packet has at most 65536 octets. With 16 entry points, each one
394 * spawns 4096 octets. We limit these to 64 fragments each, which
395 * means on average every fragment must have at least 64 octets.
396 */
397 index = pf_frent_index(frent);
398 if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT)
399 return ENOBUFS;
400 frag->fr_entries[index]++;
401
402 if (prev == NULL) {
403 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
404 } else {
405 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off);
406 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
407 }
408
409 if (frag->fr_firstoff[index] == NULL) {
410 KASSERT(prev == NULL || pf_frent_index(prev) < index);
411 frag->fr_firstoff[index] = frent;
412 } else {
413 if (frent->fe_off < frag->fr_firstoff[index]->fe_off) {
414 KASSERT(prev == NULL || pf_frent_index(prev) < index);
415 frag->fr_firstoff[index] = frent;
416 } else {
417 KASSERT(prev != NULL);
418 KASSERT(pf_frent_index(prev) == index);
419 }
420 }
421
422 frag->fr_holes += pf_frent_holes(frent);
423
424 return 0;
425 }
426
427 void
428 pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent)
429 {
430 #ifdef DIAGNOSTIC
431 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
432 #endif
433 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
434 int index;
435
436 frag->fr_holes -= pf_frent_holes(frent);
437
438 index = pf_frent_index(frent);
439 KASSERT(frag->fr_firstoff[index] != NULL);
440 if (frag->fr_firstoff[index]->fe_off == frent->fe_off) {
441 if (next == NULL) {
442 frag->fr_firstoff[index] = NULL;
443 } else {
444 KASSERT(frent->fe_off + frent->fe_len <= next->fe_off);
445 if (pf_frent_index(next) == index) {
446 frag->fr_firstoff[index] = next;
447 } else {
448 frag->fr_firstoff[index] = NULL;
449 }
450 }
451 } else {
452 KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off);
453 KASSERT(prev != NULL);
454 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off);
455 KASSERT(pf_frent_index(prev) == index);
456 }
457
458 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
459
460 KASSERT(frag->fr_entries[index] > 0);
461 frag->fr_entries[index]--;
462 }
463
464 struct pf_frent *
465 pf_frent_previous(struct pf_fragment *frag, struct pf_frent *frent)
466 {
467 struct pf_frent *prev, *next;
468 int index;
469
470 /*
471 * If there are no fragments after frag, take the final one. Assume
472 * that the global queue is not empty.
473 */
474 prev = TAILQ_LAST(&frag->fr_queue, pf_fragq);
475 KASSERT(prev != NULL);
476 if (prev->fe_off <= frent->fe_off)
477 return prev;
478 /*
479 * We want to find a fragment entry that is before frag, but still
480 * close to it. Find the first fragment entry that is in the same
481 * entry point or in the first entry point after that. As we have
482 * already checked that there are entries behind frag, this will
483 * succeed.
484 */
485 for (index = pf_frent_index(frent); index < PF_FRAG_ENTRY_POINTS;
486 index++) {
487 prev = frag->fr_firstoff[index];
488 if (prev != NULL)
489 break;
490 }
491 KASSERT(prev != NULL);
492 /*
493 * In prev we may have a fragment from the same entry point that is
494 * before frent, or one that is just one position behind frent.
495 * In the latter case, we go back one step and have the predecessor.
496 * There may be none if the new fragment will be the first one.
497 */
498 if (prev->fe_off > frent->fe_off) {
499 prev = TAILQ_PREV(prev, pf_fragq, fr_next);
500 if (prev == NULL)
501 return NULL;
502 KASSERT(prev->fe_off <= frent->fe_off);
503 return prev;
504 }
505 /*
506 * In prev is the first fragment of the entry point. The offset
507 * of frag is behind it. Find the closest previous fragment.
508 */
509 for (next = TAILQ_NEXT(prev, fr_next); next != NULL;
510 next = TAILQ_NEXT(next, fr_next)) {
511 if (next->fe_off > frent->fe_off)
512 break;
513 prev = next;
514 }
515 return prev;
516 }
517
518 struct pf_fragment *
519 pf_fillup_fragment(struct pf_frnode *key, u_int32_t id,
520 struct pf_frent *frent, u_short *reason)
521 {
522 struct pf_frent *after, *next, *prev;
523 struct pf_fragment *frag;
524 struct pf_frnode *frnode;
525 u_int16_t total;
526
527 /* No empty fragments */
528 if (frent->fe_len == 0) {
529 DPFPRINTF(LOG_NOTICE, "bad fragment: len 0");
530 goto bad_fragment;
531 }
532
533 /* All fragments are 8 byte aligned */
534 if (frent->fe_mff && (frent->fe_len & 0x7)) {
535 DPFPRINTF(LOG_NOTICE, "bad fragment: mff and len %d",
536 frent->fe_len);
537 goto bad_fragment;
538 }
539
540 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET */
541 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
542 DPFPRINTF(LOG_NOTICE, "bad fragment: max packet %d",
543 frent->fe_off + frent->fe_len);
544 goto bad_fragment;
545 }
546
547 DPFPRINTF(LOG_INFO, key->fn_af == AF_INET ?
548 "reass frag %d @ %d-%d" : "reass frag %#08x @ %d-%d",
549 id, frent->fe_off, frent->fe_off + frent->fe_len);
550
551 /* Fully buffer all of the fragments in this fragment queue */
552 frag = pf_find_fragment(key, id);
553
554 /* Create a new reassembly queue for this packet */
555 if (frag == NULL) {
556 frag = pool_get(&pf_frag_pl, PR_NOWAIT);
557 if (frag == NULL) {
558 pf_flush_fragments();
559 frag = pool_get(&pf_frag_pl, PR_NOWAIT);
560 if (frag == NULL) {
561 REASON_SET(reason, PFRES_MEMORY);
562 goto drop_fragment;
563 }
564 }
565 frnode = RB_FIND(pf_frnode_tree, &pf_frnode_tree, key);
566 if (frnode == NULL) {
567 frnode = pool_get(&pf_frnode_pl, PR_NOWAIT);
568 if (frnode == NULL) {
569 pf_flush_fragments();
570 frnode = pool_get(&pf_frnode_pl, PR_NOWAIT);
571 if (frnode == NULL) {
572 REASON_SET(reason, PFRES_MEMORY);
573 pool_put(&pf_frag_pl, frag);
574 goto drop_fragment;
575 }
576 }
577 *frnode = *key;
578 RB_INIT(&frnode->fn_tree);
579 frnode->fn_fragments = 0;
580 frnode->fn_gen = 0;
581 }
582 memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff));
583 memset(frag->fr_entries, 0, sizeof(frag->fr_entries));
584 TAILQ_INIT(&frag->fr_queue);
585 frag->fr_id = id;
586 frag->fr_timeout = getuptime();
587 frag->fr_gen = frnode->fn_gen++;
588 frag->fr_maxlen = frent->fe_len;
589 frag->fr_holes = 1;
590 frag->fr_node = frnode;
591 /* RB_INSERT cannot fail as pf_find_fragment() found nothing */
592 RB_INSERT(pf_frag_tree, &frnode->fn_tree, frag);
593 frnode->fn_fragments++;
594 if (frnode->fn_fragments == 1)
595 RB_INSERT(pf_frnode_tree, &pf_frnode_tree, frnode);
596 TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
597
598 /* We do not have a previous fragment, cannot fail. */
599 pf_frent_insert(frag, frent, NULL);
600
601 return (frag);
602 }
603
604 KASSERT(!TAILQ_EMPTY(&frag->fr_queue));
605 KASSERT(frag->fr_node);
606
607 /* Remember maximum fragment len for refragmentation */
608 if (frent->fe_len > frag->fr_maxlen)
609 frag->fr_maxlen = frent->fe_len;
610
611 /* Maximum data we have seen already */
612 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
613 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
614
615 /* Non terminal fragments must have more fragments flag */
616 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
617 goto free_ipv6_fragment;
618
619 /* Check if we saw the last fragment already */
620 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
621 if (frent->fe_off + frent->fe_len > total ||
622 (frent->fe_off + frent->fe_len == total && frent->fe_mff))
623 goto free_ipv6_fragment;
624 } else {
625 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
626 goto free_ipv6_fragment;
627 }
628
629 /* Find neighbors for newly inserted fragment */
630 prev = pf_frent_previous(frag, frent);
631 if (prev == NULL) {
632 after = TAILQ_FIRST(&frag->fr_queue);
633 KASSERT(after != NULL);
634 } else {
635 after = TAILQ_NEXT(prev, fr_next);
636 }
637
638 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
639 u_int16_t precut;
640
641 #ifdef INET6
642 if (frag->fr_node->fn_af == AF_INET6)
643 goto free_ipv6_fragment;
644 #endif /* INET6 */
645
646 precut = prev->fe_off + prev->fe_len - frent->fe_off;
647 if (precut >= frent->fe_len) {
648 DPFPRINTF(LOG_NOTICE, "new frag overlapped");
649 goto drop_fragment;
650 }
651 DPFPRINTF(LOG_NOTICE, "frag head overlap %d", precut);
652 m_adj(frent->fe_m, precut);
653 frent->fe_off += precut;
654 frent->fe_len -= precut;
655 }
656
657 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
658 after = next) {
659 u_int16_t aftercut;
660
661 #ifdef INET6
662 if (frag->fr_node->fn_af == AF_INET6)
663 goto free_ipv6_fragment;
664 #endif /* INET6 */
665
666 aftercut = frent->fe_off + frent->fe_len - after->fe_off;
667 if (aftercut < after->fe_len) {
668 int old_index, new_index;
669
670 DPFPRINTF(LOG_NOTICE, "frag tail overlap %d", aftercut);
671 m_adj(after->fe_m, aftercut);
672 old_index = pf_frent_index(after);
673 after->fe_off += aftercut;
674 after->fe_len -= aftercut;
675 new_index = pf_frent_index(after);
676 if (old_index != new_index) {
677 DPFPRINTF(LOG_DEBUG, "frag index %d, new %d",
678 old_index, new_index);
679 /* Fragment switched queue as fe_off changed */
680 after->fe_off -= aftercut;
681 after->fe_len += aftercut;
682 /* Remove restored fragment from old queue */
683 pf_frent_remove(frag, after);
684 after->fe_off += aftercut;
685 after->fe_len -= aftercut;
686 /* Insert into correct queue */
687 if (pf_frent_insert(frag, after, prev)) {
688 DPFPRINTF(LOG_WARNING,
689 "fragment requeue limit exceeded");
690 m_freem(after->fe_m);
691 pool_put(&pf_frent_pl, after);
692 pf_nfrents--;
693 /* There is not way to recover */
694 goto free_fragment;
695 }
696 }
697 break;
698 }
699
700 /* This fragment is completely overlapped, lose it */
701 DPFPRINTF(LOG_NOTICE, "old frag overlapped");
702 next = TAILQ_NEXT(after, fr_next);
703 pf_frent_remove(frag, after);
704 m_freem(after->fe_m);
705 pool_put(&pf_frent_pl, after);
706 pf_nfrents--;
707 }
708
709 /* If part of the queue gets too long, there is not way to recover. */
710 if (pf_frent_insert(frag, frent, prev)) {
711 DPFPRINTF(LOG_WARNING, "fragment queue limit exceeded");
712 goto free_fragment;
713 }
714
715 return (frag);
716
717 free_ipv6_fragment:
718 if (frag->fr_node->fn_af == AF_INET)
719 goto bad_fragment;
720 /*
721 * RFC 5722, Errata 3089: When reassembling an IPv6 datagram, if one
722 * or more its constituent fragments is determined to be an overlapping
723 * fragment, the entire datagram (and any constituent fragments) MUST
724 * be silently discarded.
725 */
726 DPFPRINTF(LOG_NOTICE, "flush overlapping fragments");
727 free_fragment:
728 pf_free_fragment(frag);
729 bad_fragment:
730 REASON_SET(reason, PFRES_FRAG);
731 drop_fragment:
732 pool_put(&pf_frent_pl, frent);
733 pf_nfrents--;
734 return (NULL);
735 }
736
737 struct mbuf *
738 pf_join_fragment(struct pf_fragment *frag)
739 {
740 struct mbuf *m, *m2;
741 struct pf_frent *frent;
742
743 frent = TAILQ_FIRST(&frag->fr_queue);
744 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
745
746 m = frent->fe_m;
747 /* Strip off any trailing bytes */
748 if ((frent->fe_hdrlen + frent->fe_len) < m->m_pkthdr.len)
749 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
750 /* Magic from ip_input */
751 m2 = m->m_next;
752 m->m_next = NULL;
753 m_cat(m, m2);
754 pool_put(&pf_frent_pl, frent);
755 pf_nfrents--;
756
757 while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
758 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
759 m2 = frent->fe_m;
760 /* Strip off ip header */
761 m_adj(m2, frent->fe_hdrlen);
762 /* Strip off any trailing bytes */
763 if (frent->fe_len < m2->m_pkthdr.len)
764 m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
765 pool_put(&pf_frent_pl, frent);
766 pf_nfrents--;
767 m_removehdr(m2);
768 m_cat(m, m2);
769 }
770
771 /* Remove from fragment queue */
772 pf_free_fragment(frag);
773
774 return (m);
775 }
776
777 int
778 pf_reassemble(struct mbuf **m0, int dir, u_short *reason)
779 {
780 struct mbuf *m = *m0;
781 struct ip *ip = mtod(m, struct ip *);
782 struct pf_frent *frent;
783 struct pf_fragment *frag;
784 struct pf_frnode key;
785 u_int16_t total, hdrlen;
786
787 /* Get an entry for the fragment queue */
788 if ((frent = pf_create_fragment(reason)) == NULL)
789 return (PF_DROP);
790
791 frent->fe_m = m;
792 frent->fe_hdrlen = ip->ip_hl << 2;
793 frent->fe_extoff = 0;
794 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
795 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
796 frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
797
798 key.fn_src.v4 = ip->ip_src;
799 key.fn_dst.v4 = ip->ip_dst;
800 key.fn_af = AF_INET;
801 key.fn_proto = ip->ip_p;
802 key.fn_direction = dir;
803
804 if ((frag = pf_fillup_fragment(&key, ip->ip_id, frent, reason))
805 == NULL)
806 return (PF_DROP);
807
808 /* The mbuf is part of the fragment entry, no direct free or access */
809 m = *m0 = NULL;
810
811 if (frag->fr_holes) {
812 DPFPRINTF(LOG_DEBUG, "frag %d, holes %d",
813 frag->fr_id, frag->fr_holes);
814 return (PF_PASS); /* drop because *m0 is NULL, no error */
815 }
816
817 /* We have all the data */
818 frent = TAILQ_FIRST(&frag->fr_queue);
819 KASSERT(frent != NULL);
820 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
821 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
822 hdrlen = frent->fe_hdrlen;
823 m = *m0 = pf_join_fragment(frag);
824 frag = NULL;
825 m_calchdrlen(m);
826
827 ip = mtod(m, struct ip *);
828 ip->ip_len = htons(hdrlen + total);
829 ip->ip_off &= ~(IP_MF|IP_OFFMASK);
830
831 if (hdrlen + total > IP_MAXPACKET) {
832 DPFPRINTF(LOG_NOTICE, "drop: too big: %d", total);
833 ip->ip_len = 0;
834 REASON_SET(reason, PFRES_SHORT);
835 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
836 return (PF_DROP);
837 }
838
839 DPFPRINTF(LOG_INFO, "complete: %p(%d)", m, ntohs(ip->ip_len));
840 return (PF_PASS);
841 }
842
843 #ifdef INET6
844 int
845 pf_reassemble6(struct mbuf **m0, struct ip6_frag *fraghdr,
846 u_int16_t hdrlen, u_int16_t extoff, int dir, u_short *reason)
847 {
848 struct mbuf *m = *m0;
849 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
850 struct m_tag *mtag;
851 struct pf_fragment_tag *ftag;
852 struct pf_frent *frent;
853 struct pf_fragment *frag;
854 struct pf_frnode key;
855 int off;
856 u_int16_t total, maxlen;
857 u_int8_t proto;
858
859 /* Get an entry for the fragment queue */
860 if ((frent = pf_create_fragment(reason)) == NULL)
861 return (PF_DROP);
862
863 frent->fe_m = m;
864 frent->fe_hdrlen = hdrlen;
865 frent->fe_extoff = extoff;
866 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
867 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
868 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
869
870 key.fn_src.v6 = ip6->ip6_src;
871 key.fn_dst.v6 = ip6->ip6_dst;
872 key.fn_af = AF_INET6;
873 /* Only the first fragment's protocol is relevant */
874 key.fn_proto = 0;
875 key.fn_direction = dir;
876
877 if ((frag = pf_fillup_fragment(&key, fraghdr->ip6f_ident, frent,
878 reason)) == NULL)
879 return (PF_DROP);
880
881 /* The mbuf is part of the fragment entry, no direct free or access */
882 m = *m0 = NULL;
883
884 if (frag->fr_holes) {
885 DPFPRINTF(LOG_DEBUG, "frag %#08x, holes %d",
886 frag->fr_id, frag->fr_holes);
887 return (PF_PASS); /* drop because *m0 is NULL, no error */
888 }
889
890 /* We have all the data */
891 frent = TAILQ_FIRST(&frag->fr_queue);
892 KASSERT(frent != NULL);
893 extoff = frent->fe_extoff;
894 maxlen = frag->fr_maxlen;
895 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
896 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
897 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
898 m = *m0 = pf_join_fragment(frag);
899 frag = NULL;
900
901 /* Take protocol from first fragment header */
902 if ((m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt),
903 &off)) == NULL)
904 panic("%s: short frag mbuf chain", __func__);
905 proto = *(mtod(m, caddr_t) + off);
906 m = *m0;
907
908 /* Delete frag6 header */
909 if (frag6_deletefraghdr(m, hdrlen) != 0)
910 goto fail;
911
912 m_calchdrlen(m);
913
914 if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED, sizeof(struct
915 pf_fragment_tag), M_NOWAIT)) == NULL)
916 goto fail;
917 ftag = (struct pf_fragment_tag *)(mtag + 1);
918 ftag->ft_hdrlen = hdrlen;
919 ftag->ft_extoff = extoff;
920 ftag->ft_maxlen = maxlen;
921 m_tag_prepend(m, mtag);
922
923 ip6 = mtod(m, struct ip6_hdr *);
924 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
925 if (extoff) {
926 /* Write protocol into next field of last extension header */
927 if ((m = m_getptr(m, extoff + offsetof(struct ip6_ext,
928 ip6e_nxt), &off)) == NULL)
929 panic("%s: short ext mbuf chain", __func__);
930 *(mtod(m, caddr_t) + off) = proto;
931 m = *m0;
932 } else
933 ip6->ip6_nxt = proto;
934
935 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
936 DPFPRINTF(LOG_NOTICE, "drop: too big: %d", total);
937 ip6->ip6_plen = 0;
938 REASON_SET(reason, PFRES_SHORT);
939 /* PF_DROP requires a valid mbuf *m0 in pf_test6() */
940 return (PF_DROP);
941 }
942
943 DPFPRINTF(LOG_INFO, "complete: %p(%d)", m, ntohs(ip6->ip6_plen));
944 return (PF_PASS);
945
946 fail:
947 REASON_SET(reason, PFRES_MEMORY);
948 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later */
949 return (PF_DROP);
950 }
951
952 int
953 pf_refragment6(struct mbuf **m0, struct m_tag *mtag, struct sockaddr_in6 *dst,
954 struct ifnet *ifp, struct rtentry *rt)
955 {
956 struct mbuf *m = *m0;
957 struct mbuf_list fml;
958 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1);
959 u_int32_t mtu;
960 u_int16_t hdrlen, extoff, maxlen;
961 u_int8_t proto;
962 int error;
963
964 hdrlen = ftag->ft_hdrlen;
965 extoff = ftag->ft_extoff;
966 maxlen = ftag->ft_maxlen;
967 m_tag_delete(m, mtag);
968 mtag = NULL;
969 ftag = NULL;
970
971 /* Checksum must be calculated for the whole packet */
972 in6_proto_cksum_out(m, NULL);
973
974 if (extoff) {
975 int off;
976
977 /* Use protocol from next field of last extension header */
978 if ((m = m_getptr(m, extoff + offsetof(struct ip6_ext,
979 ip6e_nxt), &off)) == NULL)
980 panic("%s: short ext mbuf chain", __func__);
981 proto = *(mtod(m, caddr_t) + off);
982 *(mtod(m, caddr_t) + off) = IPPROTO_FRAGMENT;
983 m = *m0;
984 } else {
985 struct ip6_hdr *hdr;
986
987 hdr = mtod(m, struct ip6_hdr *);
988 proto = hdr->ip6_nxt;
989 hdr->ip6_nxt = IPPROTO_FRAGMENT;
990 }
991
992 /*
993 * Maxlen may be less than 8 iff there was only a single
994 * fragment. As it was fragmented before, add a fragment
995 * header also for a single fragment. If total or maxlen
996 * is less than 8, ip6_fragment() will return EMSGSIZE and
997 * we drop the packet.
998 */
999 mtu = hdrlen + sizeof(struct ip6_frag) + maxlen;
1000 error = ip6_fragment(m, &fml, hdrlen, proto, mtu);
1001 *m0 = NULL; /* ip6_fragment() has consumed original packet. */
1002 if (error) {
1003 DPFPRINTF(LOG_NOTICE, "refragment error %d", error);
1004 return (PF_DROP);
1005 }
1006
1007 while ((m = ml_dequeue(&fml)) != NULL) {
1008 m->m_pkthdr.pf.flags |= PF_TAG_REFRAGMENTED;
1009 if (ifp == NULL) {
1010 ip6_forward(m, NULL, 0);
1011 } else if ((u_long)m->m_pkthdr.len <= ifp->if_mtu) {
1012 ifp->if_output(ifp, m, sin6tosa(dst), rt);
1013 } else {
1014 icmp6_error(m, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
1015 }
1016 }
1017
1018 return (PF_PASS);
1019 }
1020 #endif /* INET6 */
1021
1022 int
1023 pf_normalize_ip(struct pf_pdesc *pd, u_short *reason)
1024 {
1025 struct ip *h = mtod(pd->m, struct ip *);
1026 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1027 u_int16_t mff = (ntohs(h->ip_off) & IP_MF);
1028
1029 if (!fragoff && !mff)
1030 goto no_fragment;
1031
1032 /* Clear IP_DF if we're in no-df mode */
1033 if (pf_status.reass & PF_REASS_NODF && h->ip_off & htons(IP_DF))
1034 h->ip_off &= htons(~IP_DF);
1035
1036 /* We're dealing with a fragment now. Don't allow fragments
1037 * with IP_DF to enter the cache. If the flag was cleared by
1038 * no-df above, fine. Otherwise drop it.
1039 */
1040 if (h->ip_off & htons(IP_DF)) {
1041 DPFPRINTF(LOG_NOTICE, "bad fragment: IP_DF");
1042 REASON_SET(reason, PFRES_FRAG);
1043 return (PF_DROP);
1044 }
1045
1046 if (!pf_status.reass)
1047 return (PF_PASS); /* no reassembly */
1048
1049 /* Returns PF_DROP or m is NULL or completely reassembled mbuf */
1050 PF_FRAG_LOCK();
1051 if (pf_reassemble(&pd->m, pd->dir, reason) != PF_PASS) {
1052 PF_FRAG_UNLOCK();
1053 return (PF_DROP);
1054 }
1055 PF_FRAG_UNLOCK();
1056 if (pd->m == NULL)
1057 return (PF_PASS); /* packet has been reassembled, no error */
1058
1059 h = mtod(pd->m, struct ip *);
1060
1061 no_fragment:
1062 /* At this point, only IP_DF is allowed in ip_off */
1063 if (h->ip_off & ~htons(IP_DF))
1064 h->ip_off &= htons(IP_DF);
1065
1066 return (PF_PASS);
1067 }
1068
1069 #ifdef INET6
1070 int
1071 pf_normalize_ip6(struct pf_pdesc *pd, u_short *reason)
1072 {
1073 struct ip6_frag frag;
1074
1075 if (pd->fragoff == 0)
1076 goto no_fragment;
1077
1078 if (!pf_pull_hdr(pd->m, pd->fragoff, &frag, sizeof(frag), NULL, reason,
1079 AF_INET6))
1080 return (PF_DROP);
1081
1082 if (!pf_status.reass)
1083 return (PF_PASS); /* no reassembly */
1084
1085 /* Returns PF_DROP or m is NULL or completely reassembled mbuf */
1086 PF_FRAG_LOCK();
1087 if (pf_reassemble6(&pd->m, &frag, pd->fragoff + sizeof(frag),
1088 pd->extoff, pd->dir, reason) != PF_PASS) {
1089 PF_FRAG_UNLOCK();
1090 return (PF_DROP);
1091 }
1092 PF_FRAG_UNLOCK();
1093 if (pd->m == NULL)
1094 return (PF_PASS); /* packet has been reassembled, no error */
1095
1096 no_fragment:
1097 return (PF_PASS);
1098 }
1099 #endif /* INET6 */
1100
1101 int
1102 pf_normalize_tcp_alloc(struct pf_state_peer *src)
1103 {
1104 src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
1105 if (src->scrub == NULL)
1106 return (ENOMEM);
1107
1108 return (0);
1109 }
1110
1111 int
1112 pf_normalize_tcp(struct pf_pdesc *pd)
1113 {
1114 struct tcphdr *th = &pd->hdr.tcp;
1115 u_short reason;
1116 u_int8_t flags;
1117 u_int rewrite = 0;
1118
1119 flags = th->th_flags;
1120 if (flags & TH_SYN) {
1121 /* Illegal packet */
1122 if (flags & TH_RST)
1123 goto tcp_drop;
1124
1125 if (flags & TH_FIN) /* XXX why clear instead of drop? */
1126 flags &= ~TH_FIN;
1127 } else {
1128 /* Illegal packet */
1129 if (!(flags & (TH_ACK|TH_RST)))
1130 goto tcp_drop;
1131 }
1132
1133 if (!(flags & TH_ACK)) {
1134 /* These flags are only valid if ACK is set */
1135 if (flags & (TH_FIN|TH_PUSH|TH_URG))
1136 goto tcp_drop;
1137 }
1138
1139 /* If flags changed, or reserved data set, then adjust */
1140 if (flags != th->th_flags || th->th_x2 != 0) {
1141 /* hack: set 4-bit th_x2 = 0 */
1142 u_int8_t *th_off = (u_int8_t*)(&th->th_ack+1);
1143 pf_patch_8(pd, th_off, th->th_off << 4, PF_HI);
1144
1145 pf_patch_8(pd, &th->th_flags, flags, PF_LO);
1146 rewrite = 1;
1147 }
1148
1149 /* Remove urgent pointer, if TH_URG is not set */
1150 if (!(flags & TH_URG) && th->th_urp) {
1151 pf_patch_16(pd, &th->th_urp, 0);
1152 rewrite = 1;
1153 }
1154
1155 /* copy back packet headers if we sanitized */
1156 if (rewrite) {
1157 m_copyback(pd->m, pd->off, sizeof(*th), th, M_NOWAIT);
1158 }
1159
1160 return (PF_PASS);
1161
1162 tcp_drop:
1163 REASON_SET(&reason, PFRES_NORM);
1164 return (PF_DROP);
1165 }
1166
1167 int
1168 pf_normalize_tcp_init(struct pf_pdesc *pd, struct pf_state_peer *src)
1169 {
1170 struct tcphdr *th = &pd->hdr.tcp;
1171 u_int32_t tsval, tsecr;
1172 int olen;
1173 u_int8_t opts[MAX_TCPOPTLEN], *opt;
1174
1175
1176 KASSERT(src->scrub == NULL);
1177
1178 if (pf_normalize_tcp_alloc(src) != 0)
1179 return (1);
1180
1181 switch (pd->af) {
1182 case AF_INET: {
1183 struct ip *h = mtod(pd->m, struct ip *);
1184 src->scrub->pfss_ttl = h->ip_ttl;
1185 break;
1186 }
1187 #ifdef INET6
1188 case AF_INET6: {
1189 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *);
1190 src->scrub->pfss_ttl = h->ip6_hlim;
1191 break;
1192 }
1193 #endif /* INET6 */
1194 default:
1195 unhandled_af(pd->af);
1196 }
1197
1198 /*
1199 * All normalizations below are only begun if we see the start of
1200 * the connections. They must all set an enabled bit in pfss_flags
1201 */
1202 if ((th->th_flags & TH_SYN) == 0)
1203 return (0);
1204
1205 olen = (th->th_off << 2) - sizeof(*th);
1206 if (olen < TCPOLEN_TIMESTAMP || !pf_pull_hdr(pd->m,
1207 pd->off + sizeof(*th), opts, olen, NULL, NULL, pd->af))
1208 return (0);
1209
1210 opt = opts;
1211 while ((opt = pf_find_tcpopt(opt, opts, olen,
1212 TCPOPT_TIMESTAMP, TCPOLEN_TIMESTAMP)) != NULL) {
1213
1214 src->scrub->pfss_flags |= PFSS_TIMESTAMP;
1215 src->scrub->pfss_ts_mod = arc4random();
1216 /* note PFSS_PAWS not set yet */
1217 memcpy(&tsval, &opt[2], sizeof(u_int32_t));
1218 memcpy(&tsecr, &opt[6], sizeof(u_int32_t));
1219 src->scrub->pfss_tsval0 = ntohl(tsval);
1220 src->scrub->pfss_tsval = ntohl(tsval);
1221 src->scrub->pfss_tsecr = ntohl(tsecr);
1222 getmicrouptime(&src->scrub->pfss_last);
1223
1224 opt += opt[1];
1225 }
1226
1227 return (0);
1228 }
1229
1230 void
1231 pf_normalize_tcp_cleanup(struct pf_state *state)
1232 {
1233 if (state->src.scrub)
1234 pool_put(&pf_state_scrub_pl, state->src.scrub);
1235 if (state->dst.scrub)
1236 pool_put(&pf_state_scrub_pl, state->dst.scrub);
1237
1238 /* Someday... flush the TCP segment reassembly descriptors. */
1239 }
1240
1241 int
1242 pf_normalize_tcp_stateful(struct pf_pdesc *pd, u_short *reason,
1243 struct pf_state *state, struct pf_state_peer *src,
1244 struct pf_state_peer *dst, int *writeback)
1245 {
1246 struct tcphdr *th = &pd->hdr.tcp;
1247 struct timeval uptime;
1248 u_int tsval_from_last;
1249 u_int32_t tsval, tsecr;
1250 int copyback = 0;
1251 int got_ts = 0;
1252 int olen;
1253 u_int8_t opts[MAX_TCPOPTLEN], *opt;
1254
1255 KASSERT(src->scrub || dst->scrub);
1256
1257 /*
1258 * Enforce the minimum TTL seen for this connection. Negate a common
1259 * technique to evade an intrusion detection system and confuse
1260 * firewall state code.
1261 */
1262 switch (pd->af) {
1263 case AF_INET:
1264 if (src->scrub) {
1265 struct ip *h = mtod(pd->m, struct ip *);
1266 if (h->ip_ttl > src->scrub->pfss_ttl)
1267 src->scrub->pfss_ttl = h->ip_ttl;
1268 h->ip_ttl = src->scrub->pfss_ttl;
1269 }
1270 break;
1271 #ifdef INET6
1272 case AF_INET6:
1273 if (src->scrub) {
1274 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *);
1275 if (h->ip6_hlim > src->scrub->pfss_ttl)
1276 src->scrub->pfss_ttl = h->ip6_hlim;
1277 h->ip6_hlim = src->scrub->pfss_ttl;
1278 }
1279 break;
1280 #endif /* INET6 */
1281 default:
1282 unhandled_af(pd->af);
1283 }
1284
1285 olen = (th->th_off << 2) - sizeof(*th);
1286
1287 if (olen >= TCPOLEN_TIMESTAMP &&
1288 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1289 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1290 pf_pull_hdr(pd->m, pd->off + sizeof(*th), opts, olen, NULL, NULL,
1291 pd->af)) {
1292
1293 /* Modulate the timestamps. Can be used for NAT detection, OS
1294 * uptime determination or reboot detection.
1295 */
1296 opt = opts;
1297 while ((opt = pf_find_tcpopt(opt, opts, olen,
1298 TCPOPT_TIMESTAMP, TCPOLEN_TIMESTAMP)) != NULL) {
1299
1300 u_int8_t *ts = opt + 2;
1301 u_int8_t *tsr = opt + 6;
1302
1303 if (got_ts) {
1304 /* Huh? Multiple timestamps!? */
1305 if (pf_status.debug >= LOG_NOTICE) {
1306 log(LOG_NOTICE,
1307 "pf: %s: multiple TS??", __func__);
1308 pf_print_state(state);
1309 addlog("\n");
1310 }
1311 REASON_SET(reason, PFRES_TS);
1312 return (PF_DROP);
1313 }
1314
1315 memcpy(&tsval, ts, sizeof(u_int32_t));
1316 memcpy(&tsecr, tsr, sizeof(u_int32_t));
1317
1318 /* modulate TS */
1319 if (tsval && src->scrub &&
1320 (src->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1321 /* tsval used further on */
1322 tsval = ntohl(tsval);
1323 pf_patch_32_unaligned(pd,
1324 ts, htonl(tsval + src->scrub->pfss_ts_mod),
1325 PF_ALGNMNT(ts - opts));
1326 copyback = 1;
1327 }
1328
1329 /* modulate TS reply if any (!0) */
1330 if (tsecr && dst->scrub &&
1331 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1332 /* tsecr used further on */
1333 tsecr = ntohl(tsecr) - dst->scrub->pfss_ts_mod;
1334 pf_patch_32_unaligned(pd,
1335 tsr, htonl(tsecr), PF_ALGNMNT(tsr - opts));
1336 copyback = 1;
1337 }
1338
1339 got_ts = 1;
1340 opt += opt[1];
1341 }
1342
1343 if (copyback) {
1344 /* Copyback the options, caller copies back header */
1345 *writeback = 1;
1346 m_copyback(pd->m, pd->off + sizeof(*th), olen, opts, M_NOWAIT);
1347 }
1348 }
1349
1350
1351 /*
1352 * Must invalidate PAWS checks on connections idle for too long.
1353 * The fastest allowed timestamp clock is 1ms. That turns out to
1354 * be about 24 days before it wraps. XXX Right now our lowerbound
1355 * TS echo check only works for the first 12 days of a connection
1356 * when the TS has exhausted half its 32bit space
1357 */
1358 #define TS_MAX_IDLE (24*24*60*60)
1359 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1360
1361 getmicrouptime(&uptime);
1362 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1363 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1364 getuptime() - state->creation > TS_MAX_CONN)) {
1365 if (pf_status.debug >= LOG_NOTICE) {
1366 log(LOG_NOTICE, "pf: src idled out of PAWS ");
1367 pf_print_state(state);
1368 addlog("\n");
1369 }
1370 src->scrub->pfss_flags =
1371 (src->scrub->pfss_flags & ~PFSS_PAWS) | PFSS_PAWS_IDLED;
1372 }
1373 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1374 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1375 if (pf_status.debug >= LOG_NOTICE) {
1376 log(LOG_NOTICE, "pf: dst idled out of PAWS ");
1377 pf_print_state(state);
1378 addlog("\n");
1379 }
1380 dst->scrub->pfss_flags =
1381 (dst->scrub->pfss_flags & ~PFSS_PAWS) | PFSS_PAWS_IDLED;
1382 }
1383
1384 if (got_ts && src->scrub && dst->scrub &&
1385 (src->scrub->pfss_flags & PFSS_PAWS) &&
1386 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1387 /* Validate that the timestamps are "in-window".
1388 * RFC1323 describes TCP Timestamp options that allow
1389 * measurement of RTT (round trip time) and PAWS
1390 * (protection against wrapped sequence numbers). PAWS
1391 * gives us a set of rules for rejecting packets on
1392 * long fat pipes (packets that were somehow delayed
1393 * in transit longer than the time it took to send the
1394 * full TCP sequence space of 4Gb). We can use these
1395 * rules and infer a few others that will let us treat
1396 * the 32bit timestamp and the 32bit echoed timestamp
1397 * as sequence numbers to prevent a blind attacker from
1398 * inserting packets into a connection.
1399 *
1400 * RFC1323 tells us:
1401 * - The timestamp on this packet must be greater than
1402 * or equal to the last value echoed by the other
1403 * endpoint. The RFC says those will be discarded
1404 * since it is a dup that has already been acked.
1405 * This gives us a lowerbound on the timestamp.
1406 * timestamp >= other last echoed timestamp
1407 * - The timestamp will be less than or equal to
1408 * the last timestamp plus the time between the
1409 * last packet and now. The RFC defines the max
1410 * clock rate as 1ms. We will allow clocks to be
1411 * up to 10% fast and will allow a total difference
1412 * or 30 seconds due to a route change. And this
1413 * gives us an upperbound on the timestamp.
1414 * timestamp <= last timestamp + max ticks
1415 * We have to be careful here. Windows will send an
1416 * initial timestamp of zero and then initialize it
1417 * to a random value after the 3whs; presumably to
1418 * avoid a DoS by having to call an expensive RNG
1419 * during a SYN flood. Proof MS has at least one
1420 * good security geek.
1421 *
1422 * - The TCP timestamp option must also echo the other
1423 * endpoints timestamp. The timestamp echoed is the
1424 * one carried on the earliest unacknowledged segment
1425 * on the left edge of the sequence window. The RFC
1426 * states that the host will reject any echoed
1427 * timestamps that were larger than any ever sent.
1428 * This gives us an upperbound on the TS echo.
1429 * tescr <= largest_tsval
1430 * - The lowerbound on the TS echo is a little more
1431 * tricky to determine. The other endpoint's echoed
1432 * values will not decrease. But there may be
1433 * network conditions that re-order packets and
1434 * cause our view of them to decrease. For now the
1435 * only lowerbound we can safely determine is that
1436 * the TS echo will never be less than the original
1437 * TS. XXX There is probably a better lowerbound.
1438 * Remove TS_MAX_CONN with better lowerbound check.
1439 * tescr >= other original TS
1440 *
1441 * It is also important to note that the fastest
1442 * timestamp clock of 1ms will wrap its 32bit space in
1443 * 24 days. So we just disable TS checking after 24
1444 * days of idle time. We actually must use a 12d
1445 * connection limit until we can come up with a better
1446 * lowerbound to the TS echo check.
1447 */
1448 struct timeval delta_ts;
1449 int ts_fudge;
1450
1451 /*
1452 * PFTM_TS_DIFF is how many seconds of leeway to allow
1453 * a host's timestamp. This can happen if the previous
1454 * packet got delayed in transit for much longer than
1455 * this packet.
1456 */
1457 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
1458 ts_fudge = pf_default_rule.timeout[PFTM_TS_DIFF];
1459
1460 /* Calculate max ticks since the last timestamp */
1461 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
1462 #define TS_MICROSECS 1000000 /* microseconds per second */
1463 timersub(&uptime, &src->scrub->pfss_last, &delta_ts);
1464 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1465 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1466
1467 if ((src->state >= TCPS_ESTABLISHED &&
1468 dst->state >= TCPS_ESTABLISHED) &&
1469 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1470 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1471 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1472 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1473 /* Bad RFC1323 implementation or an insertion attack.
1474 *
1475 * - Solaris 2.6 and 2.7 are known to send another ACK
1476 * after the FIN,FIN|ACK,ACK closing that carries
1477 * an old timestamp.
1478 */
1479
1480 DPFPRINTF(LOG_NOTICE, "Timestamp failed %c%c%c%c",
1481 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '' : ' ',
1482 SEQ_GT(tsval, src->scrub->pfss_tsval +
1483 tsval_from_last) ? '1' : ' ',
1484 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1485 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' ');
1486 DPFPRINTF(LOG_NOTICE, " tsval: %u tsecr: %u "
1487 "+ticks: %u idle: %llu.%06lus", tsval, tsecr,
1488 tsval_from_last, (long long)delta_ts.tv_sec,
1489 delta_ts.tv_usec);
1490 DPFPRINTF(LOG_NOTICE, " src->tsval: %u tsecr: %u",
1491 src->scrub->pfss_tsval, src->scrub->pfss_tsecr);
1492 DPFPRINTF(LOG_NOTICE, " dst->tsval: %u tsecr: %u "
1493 "tsval0: %u", dst->scrub->pfss_tsval,
1494 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0);
1495 if (pf_status.debug >= LOG_NOTICE) {
1496 log(LOG_NOTICE, "pf: ");
1497 pf_print_state(state);
1498 pf_print_flags(th->th_flags);
1499 addlog("\n");
1500 }
1501 REASON_SET(reason, PFRES_TS);
1502 return (PF_DROP);
1503 }
1504 /* XXX I'd really like to require tsecr but it's optional */
1505 } else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
1506 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1507 || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
1508 src->scrub && dst->scrub &&
1509 (src->scrub->pfss_flags & PFSS_PAWS) &&
1510 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1511 /* Didn't send a timestamp. Timestamps aren't really useful
1512 * when:
1513 * - connection opening or closing (often not even sent).
1514 * but we must not let an attacker to put a FIN on a
1515 * data packet to sneak it through our ESTABLISHED check.
1516 * - on a TCP reset. RFC suggests not even looking at TS.
1517 * - on an empty ACK. The TS will not be echoed so it will
1518 * probably not help keep the RTT calculation in sync and
1519 * there isn't as much danger when the sequence numbers
1520 * got wrapped. So some stacks don't include TS on empty
1521 * ACKs :-(
1522 *
1523 * To minimize the disruption to mostly RFC1323 conformant
1524 * stacks, we will only require timestamps on data packets.
1525 *
1526 * And what do ya know, we cannot require timestamps on data
1527 * packets. There appear to be devices that do legitimate
1528 * TCP connection hijacking. There are HTTP devices that allow
1529 * a 3whs (with timestamps) and then buffer the HTTP request.
1530 * If the intermediate device has the HTTP response cache, it
1531 * will spoof the response but not bother timestamping its
1532 * packets. So we can look for the presence of a timestamp in
1533 * the first data packet and if there, require it in all future
1534 * packets.
1535 */
1536
1537 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1538 /*
1539 * Hey! Someone tried to sneak a packet in. Or the
1540 * stack changed its RFC1323 behavior?!?!
1541 */
1542 if (pf_status.debug >= LOG_NOTICE) {
1543 log(LOG_NOTICE,
1544 "pf: did not receive expected RFC1323 "
1545 "timestamp");
1546 pf_print_state(state);
1547 pf_print_flags(th->th_flags);
1548 addlog("\n");
1549 }
1550 REASON_SET(reason, PFRES_TS);
1551 return (PF_DROP);
1552 }
1553 }
1554
1555 /*
1556 * We will note if a host sends his data packets with or without
1557 * timestamps. And require all data packets to contain a timestamp
1558 * if the first does. PAWS implicitly requires that all data packets be
1559 * timestamped. But I think there are middle-man devices that hijack
1560 * TCP streams immediately after the 3whs and don't timestamp their
1561 * packets (seen in a WWW accelerator or cache).
1562 */
1563 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1564 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1565 if (got_ts)
1566 src->scrub->pfss_flags |= PFSS_DATA_TS;
1567 else {
1568 src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1569 if (pf_status.debug >= LOG_NOTICE && dst->scrub &&
1570 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1571 /* Don't warn if other host rejected RFC1323 */
1572 log(LOG_NOTICE,
1573 "pf: broken RFC1323 stack did not "
1574 "timestamp data packet. Disabled PAWS "
1575 "security.");
1576 pf_print_state(state);
1577 pf_print_flags(th->th_flags);
1578 addlog("\n");
1579 }
1580 }
1581 }
1582
1583 /*
1584 * Update PAWS values
1585 */
1586 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1587 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1588 getmicrouptime(&src->scrub->pfss_last);
1589 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1590 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1591 src->scrub->pfss_tsval = tsval;
1592
1593 if (tsecr) {
1594 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1595 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1596 src->scrub->pfss_tsecr = tsecr;
1597
1598 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1599 (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1600 src->scrub->pfss_tsval0 == 0)) {
1601 /* tsval0 MUST be the lowest timestamp */
1602 src->scrub->pfss_tsval0 = tsval;
1603 }
1604
1605 /* Only fully initialized after a TS gets echoed */
1606 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1607 src->scrub->pfss_flags |= PFSS_PAWS;
1608 }
1609 }
1610
1611 /* I have a dream.... TCP segment reassembly.... */
1612 return (0);
1613 }
1614
1615 int
1616 pf_normalize_mss(struct pf_pdesc *pd, u_int16_t maxmss)
1617 {
1618 int olen, optsoff;
1619 u_int8_t opts[MAX_TCPOPTLEN], *opt;
1620
1621 olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr);
1622 optsoff = pd->off + sizeof(struct tcphdr);
1623 if (olen < TCPOLEN_MAXSEG ||
1624 !pf_pull_hdr(pd->m, optsoff, opts, olen, NULL, NULL, pd->af))
1625 return (0);
1626
1627 opt = opts;
1628 while ((opt = pf_find_tcpopt(opt, opts, olen,
1629 TCPOPT_MAXSEG, TCPOLEN_MAXSEG)) != NULL) {
1630 u_int16_t mss;
1631 u_int8_t *mssp = opt + 2;
1632 memcpy(&mss, mssp, sizeof(mss));
1633 if (ntohs(mss) > maxmss) {
1634 size_t mssoffopts = mssp - opts;
1635 pf_patch_16_unaligned(pd, &mss,
1636 htons(maxmss), PF_ALGNMNT(mssoffopts));
1637 m_copyback(pd->m, optsoff + mssoffopts,
1638 sizeof(mss), &mss, M_NOWAIT);
1639 m_copyback(pd->m, pd->off,
1640 sizeof(struct tcphdr), &pd->hdr.tcp, M_NOWAIT);
1641 }
1642
1643 opt += opt[1];
1644 }
1645
1646 return (0);
1647 }
1648
1649 void
1650 pf_scrub(struct mbuf *m, u_int16_t flags, sa_family_t af, u_int8_t min_ttl,
1651 u_int8_t tos)
1652 {
1653 struct ip *h = mtod(m, struct ip *);
1654 #ifdef INET6
1655 struct ip6_hdr *h6 = mtod(m, struct ip6_hdr *);
1656 #endif /* INET6 */
1657 u_int16_t old;
1658
1659 /* Clear IP_DF if no-df was requested */
1660 if (flags & PFSTATE_NODF && af == AF_INET && h->ip_off & htons(IP_DF)) {
1661 old = h->ip_off;
1662 h->ip_off &= htons(~IP_DF);
1663 pf_cksum_fixup(&h->ip_sum, old, h->ip_off, 0);
1664 }
1665
1666 /* Enforce a minimum ttl, may cause endless packet loops */
1667 if (min_ttl && af == AF_INET && h->ip_ttl < min_ttl) {
1668 old = h->ip_ttl;
1669 h->ip_ttl = min_ttl;
1670 pf_cksum_fixup(&h->ip_sum, old, h->ip_ttl, 0);
1671 }
1672 #ifdef INET6
1673 if (min_ttl && af == AF_INET6 && h6->ip6_hlim < min_ttl)
1674 h6->ip6_hlim = min_ttl;
1675 #endif /* INET6 */
1676
1677 /* Enforce tos */
1678 if (flags & PFSTATE_SETTOS) {
1679 if (af == AF_INET) {
1680 /*
1681 * ip_tos is 8 bit field at offset 1. Use 16 bit value
1682 * at offset 0.
1683 */
1684 old = *(u_int16_t *)h;
1685 h->ip_tos = tos | (h->ip_tos & IPTOS_ECN_MASK);
1686 pf_cksum_fixup(&h->ip_sum, old, *(u_int16_t *)h, 0);
1687 }
1688 #ifdef INET6
1689 if (af == AF_INET6) {
1690 /* drugs are unable to explain such idiocy */
1691 h6->ip6_flow &= ~htonl(0x0fc00000);
1692 h6->ip6_flow |= htonl(((u_int32_t)tos) << 20);
1693 }
1694 #endif /* INET6 */
1695 }
1696
1697 /* random-id, but not for fragments */
1698 if (flags & PFSTATE_RANDOMID && af == AF_INET &&
1699 !(h->ip_off & ~htons(IP_DF))) {
1700 old = h->ip_id;
1701 h->ip_id = htons(ip_randomid());
1702 pf_cksum_fixup(&h->ip_sum, old, h->ip_id, 0);
1703 }
1704 }
Cache object: 009a4194b0e37ae3cc82799d4daa0a4d
|