1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5 * Copyright 2011-2018 Alexander Bluhm <bluhm@openbsd.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_pf.h"
37
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/mbuf.h>
42 #include <sys/mutex.h>
43 #include <sys/refcount.h>
44 #include <sys/socket.h>
45
46 #include <net/if.h>
47 #include <net/vnet.h>
48 #include <net/pfvar.h>
49 #include <net/if_pflog.h>
50
51 #include <netinet/in.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip_var.h>
54 #include <netinet6/ip6_var.h>
55 #include <netinet/tcp.h>
56 #include <netinet/tcp_fsm.h>
57 #include <netinet/tcp_seq.h>
58
59 #ifdef INET6
60 #include <netinet/ip6.h>
61 #endif /* INET6 */
62
63 struct pf_frent {
64 TAILQ_ENTRY(pf_frent) fr_next;
65 struct mbuf *fe_m;
66 uint16_t fe_hdrlen; /* ipv4 header length with ip options
67 ipv6, extension, fragment header */
68 uint16_t fe_extoff; /* last extension header offset or 0 */
69 uint16_t fe_len; /* fragment length */
70 uint16_t fe_off; /* fragment offset */
71 uint16_t fe_mff; /* more fragment flag */
72 };
73
74 struct pf_fragment_cmp {
75 struct pf_addr frc_src;
76 struct pf_addr frc_dst;
77 uint32_t frc_id;
78 sa_family_t frc_af;
79 uint8_t frc_proto;
80 };
81
82 struct pf_fragment {
83 struct pf_fragment_cmp fr_key;
84 #define fr_src fr_key.frc_src
85 #define fr_dst fr_key.frc_dst
86 #define fr_id fr_key.frc_id
87 #define fr_af fr_key.frc_af
88 #define fr_proto fr_key.frc_proto
89
90 /* pointers to queue element */
91 struct pf_frent *fr_firstoff[PF_FRAG_ENTRY_POINTS];
92 /* count entries between pointers */
93 uint8_t fr_entries[PF_FRAG_ENTRY_POINTS];
94 RB_ENTRY(pf_fragment) fr_entry;
95 TAILQ_ENTRY(pf_fragment) frag_next;
96 uint32_t fr_timeout;
97 uint16_t fr_maxlen; /* maximum length of single fragment */
98 u_int16_t fr_holes; /* number of holes in the queue */
99 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
100 };
101
102 struct pf_fragment_tag {
103 uint16_t ft_hdrlen; /* header length of reassembled pkt */
104 uint16_t ft_extoff; /* last extension header offset or 0 */
105 uint16_t ft_maxlen; /* maximum fragment payload length */
106 uint32_t ft_id; /* fragment id */
107 };
108
109 VNET_DEFINE_STATIC(struct mtx, pf_frag_mtx);
110 #define V_pf_frag_mtx VNET(pf_frag_mtx)
111 #define PF_FRAG_LOCK() mtx_lock(&V_pf_frag_mtx)
112 #define PF_FRAG_UNLOCK() mtx_unlock(&V_pf_frag_mtx)
113 #define PF_FRAG_ASSERT() mtx_assert(&V_pf_frag_mtx, MA_OWNED)
114
115 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */
116
117 VNET_DEFINE_STATIC(uma_zone_t, pf_frent_z);
118 #define V_pf_frent_z VNET(pf_frent_z)
119 VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z);
120 #define V_pf_frag_z VNET(pf_frag_z)
121
122 TAILQ_HEAD(pf_fragqueue, pf_fragment);
123 TAILQ_HEAD(pf_cachequeue, pf_fragment);
124 VNET_DEFINE_STATIC(struct pf_fragqueue, pf_fragqueue);
125 #define V_pf_fragqueue VNET(pf_fragqueue)
126 RB_HEAD(pf_frag_tree, pf_fragment);
127 VNET_DEFINE_STATIC(struct pf_frag_tree, pf_frag_tree);
128 #define V_pf_frag_tree VNET(pf_frag_tree)
129 static int pf_frag_compare(struct pf_fragment *,
130 struct pf_fragment *);
131 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
132 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
133
134 static void pf_flush_fragments(void);
135 static void pf_free_fragment(struct pf_fragment *);
136 static void pf_remove_fragment(struct pf_fragment *);
137 static int pf_normalize_tcpopt(struct pf_krule *, struct mbuf *,
138 struct tcphdr *, int, sa_family_t);
139 static struct pf_frent *pf_create_fragment(u_short *);
140 static int pf_frent_holes(struct pf_frent *frent);
141 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
142 struct pf_frag_tree *tree);
143 static inline int pf_frent_index(struct pf_frent *);
144 static int pf_frent_insert(struct pf_fragment *,
145 struct pf_frent *, struct pf_frent *);
146 void pf_frent_remove(struct pf_fragment *,
147 struct pf_frent *);
148 struct pf_frent *pf_frent_previous(struct pf_fragment *,
149 struct pf_frent *);
150 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *,
151 struct pf_frent *, u_short *);
152 static struct mbuf *pf_join_fragment(struct pf_fragment *);
153 #ifdef INET
154 static void pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t);
155 static int pf_reassemble(struct mbuf **, struct ip *, int, u_short *);
156 #endif /* INET */
157 #ifdef INET6
158 static int pf_reassemble6(struct mbuf **, struct ip6_hdr *,
159 struct ip6_frag *, uint16_t, uint16_t, u_short *);
160 static void pf_scrub_ip6(struct mbuf **, uint32_t, uint8_t, uint8_t);
161 #endif /* INET6 */
162
163 #define DPFPRINTF(x) do { \
164 if (V_pf_status.debug >= PF_DEBUG_MISC) { \
165 printf("%s: ", __func__); \
166 printf x ; \
167 } \
168 } while(0)
169
170 #ifdef INET
171 static void
172 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key)
173 {
174
175 key->frc_src.v4 = ip->ip_src;
176 key->frc_dst.v4 = ip->ip_dst;
177 key->frc_af = AF_INET;
178 key->frc_proto = ip->ip_p;
179 key->frc_id = ip->ip_id;
180 }
181 #endif /* INET */
182
183 void
184 pf_normalize_init(void)
185 {
186
187 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
188 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
189 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
190 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
191 V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
192 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL,
193 UMA_ALIGN_PTR, 0);
194
195 mtx_init(&V_pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
196
197 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
198 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
199 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
200 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
201
202 TAILQ_INIT(&V_pf_fragqueue);
203 }
204
205 void
206 pf_normalize_cleanup(void)
207 {
208
209 uma_zdestroy(V_pf_state_scrub_z);
210 uma_zdestroy(V_pf_frent_z);
211 uma_zdestroy(V_pf_frag_z);
212
213 mtx_destroy(&V_pf_frag_mtx);
214 }
215
216 static int
217 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
218 {
219 int diff;
220
221 if ((diff = a->fr_id - b->fr_id) != 0)
222 return (diff);
223 if ((diff = a->fr_proto - b->fr_proto) != 0)
224 return (diff);
225 if ((diff = a->fr_af - b->fr_af) != 0)
226 return (diff);
227 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0)
228 return (diff);
229 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0)
230 return (diff);
231 return (0);
232 }
233
234 void
235 pf_purge_expired_fragments(void)
236 {
237 u_int32_t expire = time_uptime -
238 V_pf_default_rule.timeout[PFTM_FRAG];
239
240 pf_purge_fragments(expire);
241 }
242
243 void
244 pf_purge_fragments(uint32_t expire)
245 {
246 struct pf_fragment *frag;
247
248 PF_FRAG_LOCK();
249 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
250 if (frag->fr_timeout > expire)
251 break;
252
253 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
254 pf_free_fragment(frag);
255 }
256
257 PF_FRAG_UNLOCK();
258 }
259
260 /*
261 * Try to flush old fragments to make space for new ones
262 */
263 static void
264 pf_flush_fragments(void)
265 {
266 struct pf_fragment *frag;
267 int goal;
268
269 PF_FRAG_ASSERT();
270
271 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
272 DPFPRINTF(("trying to free %d frag entriess\n", goal));
273 while (goal < uma_zone_get_cur(V_pf_frent_z)) {
274 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
275 if (frag)
276 pf_free_fragment(frag);
277 else
278 break;
279 }
280 }
281
282 /* Frees the fragments and all associated entries */
283 static void
284 pf_free_fragment(struct pf_fragment *frag)
285 {
286 struct pf_frent *frent;
287
288 PF_FRAG_ASSERT();
289
290 /* Free all fragments */
291 for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
292 frent = TAILQ_FIRST(&frag->fr_queue)) {
293 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
294
295 m_freem(frent->fe_m);
296 uma_zfree(V_pf_frent_z, frent);
297 }
298
299 pf_remove_fragment(frag);
300 }
301
302 static struct pf_fragment *
303 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree)
304 {
305 struct pf_fragment *frag;
306
307 PF_FRAG_ASSERT();
308
309 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key);
310 if (frag != NULL) {
311 /* XXX Are we sure we want to update the timeout? */
312 frag->fr_timeout = time_uptime;
313 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
314 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
315 }
316
317 return (frag);
318 }
319
320 /* Removes a fragment from the fragment queue and frees the fragment */
321 static void
322 pf_remove_fragment(struct pf_fragment *frag)
323 {
324
325 PF_FRAG_ASSERT();
326 KASSERT(frag, ("frag != NULL"));
327
328 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
329 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
330 uma_zfree(V_pf_frag_z, frag);
331 }
332
333 static struct pf_frent *
334 pf_create_fragment(u_short *reason)
335 {
336 struct pf_frent *frent;
337
338 PF_FRAG_ASSERT();
339
340 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
341 if (frent == NULL) {
342 pf_flush_fragments();
343 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
344 if (frent == NULL) {
345 REASON_SET(reason, PFRES_MEMORY);
346 return (NULL);
347 }
348 }
349
350 return (frent);
351 }
352
353 /*
354 * Calculate the additional holes that were created in the fragment
355 * queue by inserting this fragment. A fragment in the middle
356 * creates one more hole by splitting. For each connected side,
357 * it loses one hole.
358 * Fragment entry must be in the queue when calling this function.
359 */
360 static int
361 pf_frent_holes(struct pf_frent *frent)
362 {
363 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
364 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
365 int holes = 1;
366
367 if (prev == NULL) {
368 if (frent->fe_off == 0)
369 holes--;
370 } else {
371 KASSERT(frent->fe_off != 0, ("frent->fe_off != 0"));
372 if (frent->fe_off == prev->fe_off + prev->fe_len)
373 holes--;
374 }
375 if (next == NULL) {
376 if (!frent->fe_mff)
377 holes--;
378 } else {
379 KASSERT(frent->fe_mff, ("frent->fe_mff"));
380 if (next->fe_off == frent->fe_off + frent->fe_len)
381 holes--;
382 }
383 return holes;
384 }
385
386 static inline int
387 pf_frent_index(struct pf_frent *frent)
388 {
389 /*
390 * We have an array of 16 entry points to the queue. A full size
391 * 65535 octet IP packet can have 8192 fragments. So the queue
392 * traversal length is at most 512 and at most 16 entry points are
393 * checked. We need 128 additional bytes on a 64 bit architecture.
394 */
395 CTASSERT(((u_int16_t)0xffff &~ 7) / (0x10000 / PF_FRAG_ENTRY_POINTS) ==
396 16 - 1);
397 CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1);
398
399 return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS);
400 }
401
402 static int
403 pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent,
404 struct pf_frent *prev)
405 {
406 int index;
407
408 CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff);
409
410 /*
411 * A packet has at most 65536 octets. With 16 entry points, each one
412 * spawns 4096 octets. We limit these to 64 fragments each, which
413 * means on average every fragment must have at least 64 octets.
414 */
415 index = pf_frent_index(frent);
416 if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT)
417 return ENOBUFS;
418 frag->fr_entries[index]++;
419
420 if (prev == NULL) {
421 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
422 } else {
423 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
424 ("overlapping fragment"));
425 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
426 }
427
428 if (frag->fr_firstoff[index] == NULL) {
429 KASSERT(prev == NULL || pf_frent_index(prev) < index,
430 ("prev == NULL || pf_frent_index(pref) < index"));
431 frag->fr_firstoff[index] = frent;
432 } else {
433 if (frent->fe_off < frag->fr_firstoff[index]->fe_off) {
434 KASSERT(prev == NULL || pf_frent_index(prev) < index,
435 ("prev == NULL || pf_frent_index(pref) < index"));
436 frag->fr_firstoff[index] = frent;
437 } else {
438 KASSERT(prev != NULL, ("prev != NULL"));
439 KASSERT(pf_frent_index(prev) == index,
440 ("pf_frent_index(prev) == index"));
441 }
442 }
443
444 frag->fr_holes += pf_frent_holes(frent);
445
446 return 0;
447 }
448
449 void
450 pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent)
451 {
452 #ifdef INVARIANTS
453 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
454 #endif
455 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
456 int index;
457
458 frag->fr_holes -= pf_frent_holes(frent);
459
460 index = pf_frent_index(frent);
461 KASSERT(frag->fr_firstoff[index] != NULL, ("frent not found"));
462 if (frag->fr_firstoff[index]->fe_off == frent->fe_off) {
463 if (next == NULL) {
464 frag->fr_firstoff[index] = NULL;
465 } else {
466 KASSERT(frent->fe_off + frent->fe_len <= next->fe_off,
467 ("overlapping fragment"));
468 if (pf_frent_index(next) == index) {
469 frag->fr_firstoff[index] = next;
470 } else {
471 frag->fr_firstoff[index] = NULL;
472 }
473 }
474 } else {
475 KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off,
476 ("frag->fr_firstoff[index]->fe_off < frent->fe_off"));
477 KASSERT(prev != NULL, ("prev != NULL"));
478 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
479 ("overlapping fragment"));
480 KASSERT(pf_frent_index(prev) == index,
481 ("pf_frent_index(prev) == index"));
482 }
483
484 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
485
486 KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining"));
487 frag->fr_entries[index]--;
488 }
489
490 struct pf_frent *
491 pf_frent_previous(struct pf_fragment *frag, struct pf_frent *frent)
492 {
493 struct pf_frent *prev, *next;
494 int index;
495
496 /*
497 * If there are no fragments after frag, take the final one. Assume
498 * that the global queue is not empty.
499 */
500 prev = TAILQ_LAST(&frag->fr_queue, pf_fragq);
501 KASSERT(prev != NULL, ("prev != NULL"));
502 if (prev->fe_off <= frent->fe_off)
503 return prev;
504 /*
505 * We want to find a fragment entry that is before frag, but still
506 * close to it. Find the first fragment entry that is in the same
507 * entry point or in the first entry point after that. As we have
508 * already checked that there are entries behind frag, this will
509 * succeed.
510 */
511 for (index = pf_frent_index(frent); index < PF_FRAG_ENTRY_POINTS;
512 index++) {
513 prev = frag->fr_firstoff[index];
514 if (prev != NULL)
515 break;
516 }
517 KASSERT(prev != NULL, ("prev != NULL"));
518 /*
519 * In prev we may have a fragment from the same entry point that is
520 * before frent, or one that is just one position behind frent.
521 * In the latter case, we go back one step and have the predecessor.
522 * There may be none if the new fragment will be the first one.
523 */
524 if (prev->fe_off > frent->fe_off) {
525 prev = TAILQ_PREV(prev, pf_fragq, fr_next);
526 if (prev == NULL)
527 return NULL;
528 KASSERT(prev->fe_off <= frent->fe_off,
529 ("prev->fe_off <= frent->fe_off"));
530 return prev;
531 }
532 /*
533 * In prev is the first fragment of the entry point. The offset
534 * of frag is behind it. Find the closest previous fragment.
535 */
536 for (next = TAILQ_NEXT(prev, fr_next); next != NULL;
537 next = TAILQ_NEXT(next, fr_next)) {
538 if (next->fe_off > frent->fe_off)
539 break;
540 prev = next;
541 }
542 return prev;
543 }
544
545 static struct pf_fragment *
546 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
547 u_short *reason)
548 {
549 struct pf_frent *after, *next, *prev;
550 struct pf_fragment *frag;
551 uint16_t total;
552 int old_index, new_index;
553
554 PF_FRAG_ASSERT();
555
556 /* No empty fragments. */
557 if (frent->fe_len == 0) {
558 DPFPRINTF(("bad fragment: len 0\n"));
559 goto bad_fragment;
560 }
561
562 /* All fragments are 8 byte aligned. */
563 if (frent->fe_mff && (frent->fe_len & 0x7)) {
564 DPFPRINTF(("bad fragment: mff and len %d\n", frent->fe_len));
565 goto bad_fragment;
566 }
567
568 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
569 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
570 DPFPRINTF(("bad fragment: max packet %d\n",
571 frent->fe_off + frent->fe_len));
572 goto bad_fragment;
573 }
574
575 DPFPRINTF((key->frc_af == AF_INET ?
576 "reass frag %d @ %d-%d\n" : "reass frag %#08x @ %d-%d\n",
577 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len));
578
579 /* Fully buffer all of the fragments in this fragment queue. */
580 frag = pf_find_fragment(key, &V_pf_frag_tree);
581
582 /* Create a new reassembly queue for this packet. */
583 if (frag == NULL) {
584 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
585 if (frag == NULL) {
586 pf_flush_fragments();
587 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
588 if (frag == NULL) {
589 REASON_SET(reason, PFRES_MEMORY);
590 goto drop_fragment;
591 }
592 }
593
594 *(struct pf_fragment_cmp *)frag = *key;
595 memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff));
596 memset(frag->fr_entries, 0, sizeof(frag->fr_entries));
597 frag->fr_timeout = time_uptime;
598 frag->fr_maxlen = frent->fe_len;
599 frag->fr_holes = 1;
600 TAILQ_INIT(&frag->fr_queue);
601
602 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
603 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
604
605 /* We do not have a previous fragment, cannot fail. */
606 pf_frent_insert(frag, frent, NULL);
607
608 return (frag);
609 }
610
611 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
612
613 /* Remember maximum fragment len for refragmentation. */
614 if (frent->fe_len > frag->fr_maxlen)
615 frag->fr_maxlen = frent->fe_len;
616
617 /* Maximum data we have seen already. */
618 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
619 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
620
621 /* Non terminal fragments must have more fragments flag. */
622 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
623 goto bad_fragment;
624
625 /* Check if we saw the last fragment already. */
626 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
627 if (frent->fe_off + frent->fe_len > total ||
628 (frent->fe_off + frent->fe_len == total && frent->fe_mff))
629 goto bad_fragment;
630 } else {
631 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
632 goto bad_fragment;
633 }
634
635 /* Find neighbors for newly inserted fragment */
636 prev = pf_frent_previous(frag, frent);
637 if (prev == NULL) {
638 after = TAILQ_FIRST(&frag->fr_queue);
639 KASSERT(after != NULL, ("after != NULL"));
640 } else {
641 after = TAILQ_NEXT(prev, fr_next);
642 }
643
644 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
645 uint16_t precut;
646
647 precut = prev->fe_off + prev->fe_len - frent->fe_off;
648 if (precut >= frent->fe_len)
649 goto bad_fragment;
650 DPFPRINTF(("overlap -%d\n", precut));
651 m_adj(frent->fe_m, precut);
652 frent->fe_off += precut;
653 frent->fe_len -= precut;
654 }
655
656 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
657 after = next) {
658 uint16_t aftercut;
659
660 aftercut = frent->fe_off + frent->fe_len - after->fe_off;
661 DPFPRINTF(("adjust overlap %d\n", aftercut));
662 if (aftercut < after->fe_len) {
663 m_adj(after->fe_m, aftercut);
664 old_index = pf_frent_index(after);
665 after->fe_off += aftercut;
666 after->fe_len -= aftercut;
667 new_index = pf_frent_index(after);
668 if (old_index != new_index) {
669 DPFPRINTF(("frag index %d, new %d",
670 old_index, new_index));
671 /* Fragment switched queue as fe_off changed */
672 after->fe_off -= aftercut;
673 after->fe_len += aftercut;
674 /* Remove restored fragment from old queue */
675 pf_frent_remove(frag, after);
676 after->fe_off += aftercut;
677 after->fe_len -= aftercut;
678 /* Insert into correct queue */
679 if (pf_frent_insert(frag, after, prev)) {
680 DPFPRINTF(
681 ("fragment requeue limit exceeded"));
682 m_freem(after->fe_m);
683 uma_zfree(V_pf_frent_z, after);
684 /* There is not way to recover */
685 goto bad_fragment;
686 }
687 }
688 break;
689 }
690
691 /* This fragment is completely overlapped, lose it. */
692 next = TAILQ_NEXT(after, fr_next);
693 pf_frent_remove(frag, after);
694 m_freem(after->fe_m);
695 uma_zfree(V_pf_frent_z, after);
696 }
697
698 /* If part of the queue gets too long, there is not way to recover. */
699 if (pf_frent_insert(frag, frent, prev)) {
700 DPFPRINTF(("fragment queue limit exceeded\n"));
701 goto bad_fragment;
702 }
703
704 return (frag);
705
706 bad_fragment:
707 REASON_SET(reason, PFRES_FRAG);
708 drop_fragment:
709 uma_zfree(V_pf_frent_z, frent);
710 return (NULL);
711 }
712
713 static struct mbuf *
714 pf_join_fragment(struct pf_fragment *frag)
715 {
716 struct mbuf *m, *m2;
717 struct pf_frent *frent, *next;
718
719 frent = TAILQ_FIRST(&frag->fr_queue);
720 next = TAILQ_NEXT(frent, fr_next);
721
722 m = frent->fe_m;
723 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
724 uma_zfree(V_pf_frent_z, frent);
725 for (frent = next; frent != NULL; frent = next) {
726 next = TAILQ_NEXT(frent, fr_next);
727
728 m2 = frent->fe_m;
729 /* Strip off ip header. */
730 m_adj(m2, frent->fe_hdrlen);
731 /* Strip off any trailing bytes. */
732 m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
733
734 uma_zfree(V_pf_frent_z, frent);
735 m_cat(m, m2);
736 }
737
738 /* Remove from fragment queue. */
739 pf_remove_fragment(frag);
740
741 return (m);
742 }
743
744 #ifdef INET
745 static int
746 pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason)
747 {
748 struct mbuf *m = *m0;
749 struct pf_frent *frent;
750 struct pf_fragment *frag;
751 struct pf_fragment_cmp key;
752 uint16_t total, hdrlen;
753
754 /* Get an entry for the fragment queue */
755 if ((frent = pf_create_fragment(reason)) == NULL)
756 return (PF_DROP);
757
758 frent->fe_m = m;
759 frent->fe_hdrlen = ip->ip_hl << 2;
760 frent->fe_extoff = 0;
761 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
762 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
763 frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
764
765 pf_ip2key(ip, dir, &key);
766
767 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
768 return (PF_DROP);
769
770 /* The mbuf is part of the fragment entry, no direct free or access */
771 m = *m0 = NULL;
772
773 if (frag->fr_holes) {
774 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, frag->fr_holes));
775 return (PF_PASS); /* drop because *m0 is NULL, no error */
776 }
777
778 /* We have all the data */
779 frent = TAILQ_FIRST(&frag->fr_queue);
780 KASSERT(frent != NULL, ("frent != NULL"));
781 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
782 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
783 hdrlen = frent->fe_hdrlen;
784
785 m = *m0 = pf_join_fragment(frag);
786 frag = NULL;
787
788 if (m->m_flags & M_PKTHDR) {
789 int plen = 0;
790 for (m = *m0; m; m = m->m_next)
791 plen += m->m_len;
792 m = *m0;
793 m->m_pkthdr.len = plen;
794 }
795
796 ip = mtod(m, struct ip *);
797 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_len,
798 htons(hdrlen + total), 0);
799 ip->ip_len = htons(hdrlen + total);
800 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_off,
801 ip->ip_off & ~(IP_MF|IP_OFFMASK), 0);
802 ip->ip_off &= ~(IP_MF|IP_OFFMASK);
803
804 if (hdrlen + total > IP_MAXPACKET) {
805 DPFPRINTF(("drop: too big: %d\n", total));
806 ip->ip_len = 0;
807 REASON_SET(reason, PFRES_SHORT);
808 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
809 return (PF_DROP);
810 }
811
812 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
813 return (PF_PASS);
814 }
815 #endif /* INET */
816
817 #ifdef INET6
818 static int
819 pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr,
820 uint16_t hdrlen, uint16_t extoff, u_short *reason)
821 {
822 struct mbuf *m = *m0;
823 struct pf_frent *frent;
824 struct pf_fragment *frag;
825 struct pf_fragment_cmp key;
826 struct m_tag *mtag;
827 struct pf_fragment_tag *ftag;
828 int off;
829 uint32_t frag_id;
830 uint16_t total, maxlen;
831 uint8_t proto;
832
833 PF_FRAG_LOCK();
834
835 /* Get an entry for the fragment queue. */
836 if ((frent = pf_create_fragment(reason)) == NULL) {
837 PF_FRAG_UNLOCK();
838 return (PF_DROP);
839 }
840
841 frent->fe_m = m;
842 frent->fe_hdrlen = hdrlen;
843 frent->fe_extoff = extoff;
844 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
845 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
846 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
847
848 key.frc_src.v6 = ip6->ip6_src;
849 key.frc_dst.v6 = ip6->ip6_dst;
850 key.frc_af = AF_INET6;
851 /* Only the first fragment's protocol is relevant. */
852 key.frc_proto = 0;
853 key.frc_id = fraghdr->ip6f_ident;
854
855 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
856 PF_FRAG_UNLOCK();
857 return (PF_DROP);
858 }
859
860 /* The mbuf is part of the fragment entry, no direct free or access. */
861 m = *m0 = NULL;
862
863 if (frag->fr_holes) {
864 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id,
865 frag->fr_holes));
866 PF_FRAG_UNLOCK();
867 return (PF_PASS); /* Drop because *m0 is NULL, no error. */
868 }
869
870 /* We have all the data. */
871 frent = TAILQ_FIRST(&frag->fr_queue);
872 KASSERT(frent != NULL, ("frent != NULL"));
873 extoff = frent->fe_extoff;
874 maxlen = frag->fr_maxlen;
875 frag_id = frag->fr_id;
876 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
877 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
878 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
879
880 m = *m0 = pf_join_fragment(frag);
881 frag = NULL;
882
883 PF_FRAG_UNLOCK();
884
885 /* Take protocol from first fragment header. */
886 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
887 KASSERT(m, ("%s: short mbuf chain", __func__));
888 proto = *(mtod(m, caddr_t) + off);
889 m = *m0;
890
891 /* Delete frag6 header */
892 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
893 goto fail;
894
895 if (m->m_flags & M_PKTHDR) {
896 int plen = 0;
897 for (m = *m0; m; m = m->m_next)
898 plen += m->m_len;
899 m = *m0;
900 m->m_pkthdr.len = plen;
901 }
902
903 if ((mtag = m_tag_get(PF_REASSEMBLED, sizeof(struct pf_fragment_tag),
904 M_NOWAIT)) == NULL)
905 goto fail;
906 ftag = (struct pf_fragment_tag *)(mtag + 1);
907 ftag->ft_hdrlen = hdrlen;
908 ftag->ft_extoff = extoff;
909 ftag->ft_maxlen = maxlen;
910 ftag->ft_id = frag_id;
911 m_tag_prepend(m, mtag);
912
913 ip6 = mtod(m, struct ip6_hdr *);
914 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
915 if (extoff) {
916 /* Write protocol into next field of last extension header. */
917 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
918 &off);
919 KASSERT(m, ("%s: short mbuf chain", __func__));
920 *(mtod(m, char *) + off) = proto;
921 m = *m0;
922 } else
923 ip6->ip6_nxt = proto;
924
925 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
926 DPFPRINTF(("drop: too big: %d\n", total));
927 ip6->ip6_plen = 0;
928 REASON_SET(reason, PFRES_SHORT);
929 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
930 return (PF_DROP);
931 }
932
933 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip6->ip6_plen)));
934 return (PF_PASS);
935
936 fail:
937 REASON_SET(reason, PFRES_MEMORY);
938 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
939 return (PF_DROP);
940 }
941 #endif /* INET6 */
942
943 #ifdef INET6
944 int
945 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag)
946 {
947 struct mbuf *m = *m0, *t;
948 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1);
949 struct pf_pdesc pd;
950 uint32_t frag_id;
951 uint16_t hdrlen, extoff, maxlen;
952 uint8_t proto;
953 int error, action;
954
955 hdrlen = ftag->ft_hdrlen;
956 extoff = ftag->ft_extoff;
957 maxlen = ftag->ft_maxlen;
958 frag_id = ftag->ft_id;
959 m_tag_delete(m, mtag);
960 mtag = NULL;
961 ftag = NULL;
962
963 if (extoff) {
964 int off;
965
966 /* Use protocol from next field of last extension header */
967 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
968 &off);
969 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
970 proto = *(mtod(m, caddr_t) + off);
971 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
972 m = *m0;
973 } else {
974 struct ip6_hdr *hdr;
975
976 hdr = mtod(m, struct ip6_hdr *);
977 proto = hdr->ip6_nxt;
978 hdr->ip6_nxt = IPPROTO_FRAGMENT;
979 }
980
981 /* The MTU must be a multiple of 8 bytes, or we risk doing the
982 * fragmentation wrong. */
983 maxlen = maxlen & ~7;
984
985 /*
986 * Maxlen may be less than 8 if there was only a single
987 * fragment. As it was fragmented before, add a fragment
988 * header also for a single fragment. If total or maxlen
989 * is less than 8, ip6_fragment() will return EMSGSIZE and
990 * we drop the packet.
991 */
992 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
993 m = (*m0)->m_nextpkt;
994 (*m0)->m_nextpkt = NULL;
995 if (error == 0) {
996 /* The first mbuf contains the unfragmented packet. */
997 m_freem(*m0);
998 *m0 = NULL;
999 action = PF_PASS;
1000 } else {
1001 /* Drop expects an mbuf to free. */
1002 DPFPRINTF(("refragment error %d\n", error));
1003 action = PF_DROP;
1004 }
1005 for (; m; m = t) {
1006 t = m->m_nextpkt;
1007 m->m_nextpkt = NULL;
1008 m->m_flags |= M_SKIP_FIREWALL;
1009 memset(&pd, 0, sizeof(pd));
1010 pd.pf_mtag = pf_find_mtag(m);
1011 if (error == 0)
1012 ip6_forward(m, 0);
1013 else
1014 m_freem(m);
1015 }
1016
1017 return (action);
1018 }
1019 #endif /* INET6 */
1020
1021 #ifdef INET
1022 int
1023 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kkif *kif, u_short *reason,
1024 struct pf_pdesc *pd)
1025 {
1026 struct mbuf *m = *m0;
1027 struct pf_krule *r;
1028 struct ip *h = mtod(m, struct ip *);
1029 int mff = (ntohs(h->ip_off) & IP_MF);
1030 int hlen = h->ip_hl << 2;
1031 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1032 u_int16_t max;
1033 int ip_len;
1034 int tag = -1;
1035 int verdict;
1036
1037 PF_RULES_RASSERT();
1038
1039 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1040 while (r != NULL) {
1041 pf_counter_u64_add(&r->evaluations, 1);
1042 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
1043 r = r->skip[PF_SKIP_IFP].ptr;
1044 else if (r->direction && r->direction != dir)
1045 r = r->skip[PF_SKIP_DIR].ptr;
1046 else if (r->af && r->af != AF_INET)
1047 r = r->skip[PF_SKIP_AF].ptr;
1048 else if (r->proto && r->proto != h->ip_p)
1049 r = r->skip[PF_SKIP_PROTO].ptr;
1050 else if (PF_MISMATCHAW(&r->src.addr,
1051 (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
1052 r->src.neg, kif, M_GETFIB(m)))
1053 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1054 else if (PF_MISMATCHAW(&r->dst.addr,
1055 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
1056 r->dst.neg, NULL, M_GETFIB(m)))
1057 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1058 else if (r->match_tag && !pf_match_tag(m, r, &tag,
1059 pd->pf_mtag ? pd->pf_mtag->tag : 0))
1060 r = TAILQ_NEXT(r, entries);
1061 else
1062 break;
1063 }
1064
1065 if (r == NULL || r->action == PF_NOSCRUB)
1066 return (PF_PASS);
1067
1068 pf_counter_u64_critical_enter();
1069 pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1);
1070 pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len);
1071 pf_counter_u64_critical_exit();
1072
1073 /* Check for illegal packets */
1074 if (hlen < (int)sizeof(struct ip)) {
1075 REASON_SET(reason, PFRES_NORM);
1076 goto drop;
1077 }
1078
1079 if (hlen > ntohs(h->ip_len)) {
1080 REASON_SET(reason, PFRES_NORM);
1081 goto drop;
1082 }
1083
1084 /* Clear IP_DF if the rule uses the no-df option */
1085 if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
1086 u_int16_t ip_off = h->ip_off;
1087
1088 h->ip_off &= htons(~IP_DF);
1089 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1090 }
1091
1092 /* We will need other tests here */
1093 if (!fragoff && !mff)
1094 goto no_fragment;
1095
1096 /* We're dealing with a fragment now. Don't allow fragments
1097 * with IP_DF to enter the cache. If the flag was cleared by
1098 * no-df above, fine. Otherwise drop it.
1099 */
1100 if (h->ip_off & htons(IP_DF)) {
1101 DPFPRINTF(("IP_DF\n"));
1102 goto bad;
1103 }
1104
1105 ip_len = ntohs(h->ip_len) - hlen;
1106
1107 /* All fragments are 8 byte aligned */
1108 if (mff && (ip_len & 0x7)) {
1109 DPFPRINTF(("mff and %d\n", ip_len));
1110 goto bad;
1111 }
1112
1113 /* Respect maximum length */
1114 if (fragoff + ip_len > IP_MAXPACKET) {
1115 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
1116 goto bad;
1117 }
1118
1119 if (! (r->rule_flag & PFRULE_FRAGMENT_NOREASS)) {
1120 max = fragoff + ip_len;
1121
1122 /* Fully buffer all of the fragments
1123 * Might return a completely reassembled mbuf, or NULL */
1124 PF_FRAG_LOCK();
1125 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
1126 verdict = pf_reassemble(m0, h, dir, reason);
1127 PF_FRAG_UNLOCK();
1128
1129 if (verdict != PF_PASS)
1130 return (PF_DROP);
1131
1132 m = *m0;
1133 if (m == NULL)
1134 return (PF_DROP);
1135
1136 h = mtod(m, struct ip *);
1137
1138 no_fragment:
1139 /* At this point, only IP_DF is allowed in ip_off */
1140 if (h->ip_off & ~htons(IP_DF)) {
1141 u_int16_t ip_off = h->ip_off;
1142
1143 h->ip_off &= htons(IP_DF);
1144 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1145 }
1146 }
1147
1148 pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos);
1149
1150 return (PF_PASS);
1151
1152 bad:
1153 DPFPRINTF(("dropping bad fragment\n"));
1154 REASON_SET(reason, PFRES_FRAG);
1155 drop:
1156 if (r != NULL && r->log)
1157 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1158 1);
1159
1160 return (PF_DROP);
1161 }
1162 #endif
1163
1164 #ifdef INET6
1165 int
1166 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kkif *kif,
1167 u_short *reason, struct pf_pdesc *pd)
1168 {
1169 struct mbuf *m = *m0;
1170 struct pf_krule *r;
1171 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1172 int extoff;
1173 int off;
1174 struct ip6_ext ext;
1175 struct ip6_opt opt;
1176 struct ip6_frag frag;
1177 u_int32_t plen;
1178 int optend;
1179 int ooff;
1180 u_int8_t proto;
1181 int terminal;
1182
1183 PF_RULES_RASSERT();
1184
1185 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1186 while (r != NULL) {
1187 pf_counter_u64_add(&r->evaluations, 1);
1188 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
1189 r = r->skip[PF_SKIP_IFP].ptr;
1190 else if (r->direction && r->direction != dir)
1191 r = r->skip[PF_SKIP_DIR].ptr;
1192 else if (r->af && r->af != AF_INET6)
1193 r = r->skip[PF_SKIP_AF].ptr;
1194 #if 0 /* header chain! */
1195 else if (r->proto && r->proto != h->ip6_nxt)
1196 r = r->skip[PF_SKIP_PROTO].ptr;
1197 #endif
1198 else if (PF_MISMATCHAW(&r->src.addr,
1199 (struct pf_addr *)&h->ip6_src, AF_INET6,
1200 r->src.neg, kif, M_GETFIB(m)))
1201 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1202 else if (PF_MISMATCHAW(&r->dst.addr,
1203 (struct pf_addr *)&h->ip6_dst, AF_INET6,
1204 r->dst.neg, NULL, M_GETFIB(m)))
1205 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1206 else
1207 break;
1208 }
1209
1210 if (r == NULL || r->action == PF_NOSCRUB)
1211 return (PF_PASS);
1212
1213 pf_counter_u64_critical_enter();
1214 pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1);
1215 pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len);
1216 pf_counter_u64_critical_exit();
1217
1218 /* Check for illegal packets */
1219 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1220 goto drop;
1221
1222 plen = ntohs(h->ip6_plen);
1223 /* jumbo payload option not supported */
1224 if (plen == 0)
1225 goto drop;
1226
1227 extoff = 0;
1228 off = sizeof(struct ip6_hdr);
1229 proto = h->ip6_nxt;
1230 terminal = 0;
1231 do {
1232 switch (proto) {
1233 case IPPROTO_FRAGMENT:
1234 goto fragment;
1235 break;
1236 case IPPROTO_AH:
1237 case IPPROTO_ROUTING:
1238 case IPPROTO_DSTOPTS:
1239 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1240 NULL, AF_INET6))
1241 goto shortpkt;
1242 extoff = off;
1243 if (proto == IPPROTO_AH)
1244 off += (ext.ip6e_len + 2) * 4;
1245 else
1246 off += (ext.ip6e_len + 1) * 8;
1247 proto = ext.ip6e_nxt;
1248 break;
1249 case IPPROTO_HOPOPTS:
1250 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1251 NULL, AF_INET6))
1252 goto shortpkt;
1253 extoff = off;
1254 optend = off + (ext.ip6e_len + 1) * 8;
1255 ooff = off + sizeof(ext);
1256 do {
1257 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1258 sizeof(opt.ip6o_type), NULL, NULL,
1259 AF_INET6))
1260 goto shortpkt;
1261 if (opt.ip6o_type == IP6OPT_PAD1) {
1262 ooff++;
1263 continue;
1264 }
1265 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1266 NULL, NULL, AF_INET6))
1267 goto shortpkt;
1268 if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1269 goto drop;
1270 if (opt.ip6o_type == IP6OPT_JUMBO)
1271 goto drop;
1272 ooff += sizeof(opt) + opt.ip6o_len;
1273 } while (ooff < optend);
1274
1275 off = optend;
1276 proto = ext.ip6e_nxt;
1277 break;
1278 default:
1279 terminal = 1;
1280 break;
1281 }
1282 } while (!terminal);
1283
1284 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1285 goto shortpkt;
1286
1287 pf_scrub_ip6(&m, r->rule_flag, r->min_ttl, r->set_tos);
1288
1289 return (PF_PASS);
1290
1291 fragment:
1292 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1293 goto shortpkt;
1294
1295 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1296 goto shortpkt;
1297
1298 /* Offset now points to data portion. */
1299 off += sizeof(frag);
1300
1301 /* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */
1302 if (pf_reassemble6(m0, h, &frag, off, extoff, reason) != PF_PASS)
1303 return (PF_DROP);
1304 m = *m0;
1305 if (m == NULL)
1306 return (PF_DROP);
1307
1308 pd->flags |= PFDESC_IP_REAS;
1309 return (PF_PASS);
1310
1311 shortpkt:
1312 REASON_SET(reason, PFRES_SHORT);
1313 if (r != NULL && r->log)
1314 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1315 1);
1316 return (PF_DROP);
1317
1318 drop:
1319 REASON_SET(reason, PFRES_NORM);
1320 if (r != NULL && r->log)
1321 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1322 1);
1323 return (PF_DROP);
1324 }
1325 #endif /* INET6 */
1326
1327 int
1328 pf_normalize_tcp(int dir, struct pfi_kkif *kif, struct mbuf *m, int ipoff,
1329 int off, void *h, struct pf_pdesc *pd)
1330 {
1331 struct pf_krule *r, *rm = NULL;
1332 struct tcphdr *th = &pd->hdr.tcp;
1333 int rewrite = 0;
1334 u_short reason;
1335 u_int8_t flags;
1336 sa_family_t af = pd->af;
1337
1338 PF_RULES_RASSERT();
1339
1340 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1341 while (r != NULL) {
1342 pf_counter_u64_add(&r->evaluations, 1);
1343 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
1344 r = r->skip[PF_SKIP_IFP].ptr;
1345 else if (r->direction && r->direction != dir)
1346 r = r->skip[PF_SKIP_DIR].ptr;
1347 else if (r->af && r->af != af)
1348 r = r->skip[PF_SKIP_AF].ptr;
1349 else if (r->proto && r->proto != pd->proto)
1350 r = r->skip[PF_SKIP_PROTO].ptr;
1351 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1352 r->src.neg, kif, M_GETFIB(m)))
1353 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1354 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1355 r->src.port[0], r->src.port[1], th->th_sport))
1356 r = r->skip[PF_SKIP_SRC_PORT].ptr;
1357 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1358 r->dst.neg, NULL, M_GETFIB(m)))
1359 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1360 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1361 r->dst.port[0], r->dst.port[1], th->th_dport))
1362 r = r->skip[PF_SKIP_DST_PORT].ptr;
1363 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1364 pf_osfp_fingerprint(pd, m, off, th),
1365 r->os_fingerprint))
1366 r = TAILQ_NEXT(r, entries);
1367 else {
1368 rm = r;
1369 break;
1370 }
1371 }
1372
1373 if (rm == NULL || rm->action == PF_NOSCRUB)
1374 return (PF_PASS);
1375
1376 pf_counter_u64_critical_enter();
1377 pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1);
1378 pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len);
1379 pf_counter_u64_critical_exit();
1380
1381 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1382 pd->flags |= PFDESC_TCP_NORM;
1383
1384 flags = th->th_flags;
1385 if (flags & TH_SYN) {
1386 /* Illegal packet */
1387 if (flags & TH_RST)
1388 goto tcp_drop;
1389
1390 if (flags & TH_FIN)
1391 goto tcp_drop;
1392 } else {
1393 /* Illegal packet */
1394 if (!(flags & (TH_ACK|TH_RST)))
1395 goto tcp_drop;
1396 }
1397
1398 if (!(flags & TH_ACK)) {
1399 /* These flags are only valid if ACK is set */
1400 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1401 goto tcp_drop;
1402 }
1403
1404 /* Check for illegal header length */
1405 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1406 goto tcp_drop;
1407
1408 /* If flags changed, or reserved data set, then adjust */
1409 if (flags != th->th_flags || th->th_x2 != 0) {
1410 u_int16_t ov, nv;
1411
1412 ov = *(u_int16_t *)(&th->th_ack + 1);
1413 th->th_flags = flags;
1414 th->th_x2 = 0;
1415 nv = *(u_int16_t *)(&th->th_ack + 1);
1416
1417 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, ov, nv, 0);
1418 rewrite = 1;
1419 }
1420
1421 /* Remove urgent pointer, if TH_URG is not set */
1422 if (!(flags & TH_URG) && th->th_urp) {
1423 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp,
1424 0, 0);
1425 th->th_urp = 0;
1426 rewrite = 1;
1427 }
1428
1429 /* Process options */
1430 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af))
1431 rewrite = 1;
1432
1433 /* copy back packet headers if we sanitized */
1434 if (rewrite)
1435 m_copyback(m, off, sizeof(*th), (caddr_t)th);
1436
1437 return (PF_PASS);
1438
1439 tcp_drop:
1440 REASON_SET(&reason, PFRES_NORM);
1441 if (rm != NULL && r->log)
1442 PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd,
1443 1);
1444 return (PF_DROP);
1445 }
1446
1447 int
1448 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1449 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1450 {
1451 u_int32_t tsval, tsecr;
1452 u_int8_t hdr[60];
1453 u_int8_t *opt;
1454
1455 KASSERT((src->scrub == NULL),
1456 ("pf_normalize_tcp_init: src->scrub != NULL"));
1457
1458 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1459 if (src->scrub == NULL)
1460 return (1);
1461
1462 switch (pd->af) {
1463 #ifdef INET
1464 case AF_INET: {
1465 struct ip *h = mtod(m, struct ip *);
1466 src->scrub->pfss_ttl = h->ip_ttl;
1467 break;
1468 }
1469 #endif /* INET */
1470 #ifdef INET6
1471 case AF_INET6: {
1472 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1473 src->scrub->pfss_ttl = h->ip6_hlim;
1474 break;
1475 }
1476 #endif /* INET6 */
1477 }
1478
1479 /*
1480 * All normalizations below are only begun if we see the start of
1481 * the connections. They must all set an enabled bit in pfss_flags
1482 */
1483 if ((th->th_flags & TH_SYN) == 0)
1484 return (0);
1485
1486 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1487 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1488 /* Diddle with TCP options */
1489 int hlen;
1490 opt = hdr + sizeof(struct tcphdr);
1491 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1492 while (hlen >= TCPOLEN_TIMESTAMP) {
1493 switch (*opt) {
1494 case TCPOPT_EOL: /* FALLTHROUGH */
1495 case TCPOPT_NOP:
1496 opt++;
1497 hlen--;
1498 break;
1499 case TCPOPT_TIMESTAMP:
1500 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1501 src->scrub->pfss_flags |=
1502 PFSS_TIMESTAMP;
1503 src->scrub->pfss_ts_mod =
1504 htonl(arc4random());
1505
1506 /* note PFSS_PAWS not set yet */
1507 memcpy(&tsval, &opt[2],
1508 sizeof(u_int32_t));
1509 memcpy(&tsecr, &opt[6],
1510 sizeof(u_int32_t));
1511 src->scrub->pfss_tsval0 = ntohl(tsval);
1512 src->scrub->pfss_tsval = ntohl(tsval);
1513 src->scrub->pfss_tsecr = ntohl(tsecr);
1514 getmicrouptime(&src->scrub->pfss_last);
1515 }
1516 /* FALLTHROUGH */
1517 default:
1518 hlen -= MAX(opt[1], 2);
1519 opt += MAX(opt[1], 2);
1520 break;
1521 }
1522 }
1523 }
1524
1525 return (0);
1526 }
1527
1528 void
1529 pf_normalize_tcp_cleanup(struct pf_kstate *state)
1530 {
1531 if (state->src.scrub)
1532 uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1533 if (state->dst.scrub)
1534 uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1535
1536 /* Someday... flush the TCP segment reassembly descriptors. */
1537 }
1538
1539 int
1540 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1541 u_short *reason, struct tcphdr *th, struct pf_kstate *state,
1542 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1543 {
1544 struct timeval uptime;
1545 u_int32_t tsval, tsecr;
1546 u_int tsval_from_last;
1547 u_int8_t hdr[60];
1548 u_int8_t *opt;
1549 int copyback = 0;
1550 int got_ts = 0;
1551 size_t startoff;
1552
1553 KASSERT((src->scrub || dst->scrub),
1554 ("%s: src->scrub && dst->scrub!", __func__));
1555
1556 /*
1557 * Enforce the minimum TTL seen for this connection. Negate a common
1558 * technique to evade an intrusion detection system and confuse
1559 * firewall state code.
1560 */
1561 switch (pd->af) {
1562 #ifdef INET
1563 case AF_INET: {
1564 if (src->scrub) {
1565 struct ip *h = mtod(m, struct ip *);
1566 if (h->ip_ttl > src->scrub->pfss_ttl)
1567 src->scrub->pfss_ttl = h->ip_ttl;
1568 h->ip_ttl = src->scrub->pfss_ttl;
1569 }
1570 break;
1571 }
1572 #endif /* INET */
1573 #ifdef INET6
1574 case AF_INET6: {
1575 if (src->scrub) {
1576 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1577 if (h->ip6_hlim > src->scrub->pfss_ttl)
1578 src->scrub->pfss_ttl = h->ip6_hlim;
1579 h->ip6_hlim = src->scrub->pfss_ttl;
1580 }
1581 break;
1582 }
1583 #endif /* INET6 */
1584 }
1585
1586 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1587 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1588 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1589 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1590 /* Diddle with TCP options */
1591 int hlen;
1592 opt = hdr + sizeof(struct tcphdr);
1593 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1594 while (hlen >= TCPOLEN_TIMESTAMP) {
1595 startoff = opt - (hdr + sizeof(struct tcphdr));
1596 switch (*opt) {
1597 case TCPOPT_EOL: /* FALLTHROUGH */
1598 case TCPOPT_NOP:
1599 opt++;
1600 hlen--;
1601 break;
1602 case TCPOPT_TIMESTAMP:
1603 /* Modulate the timestamps. Can be used for
1604 * NAT detection, OS uptime determination or
1605 * reboot detection.
1606 */
1607
1608 if (got_ts) {
1609 /* Huh? Multiple timestamps!? */
1610 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1611 DPFPRINTF(("multiple TS??\n"));
1612 pf_print_state(state);
1613 printf("\n");
1614 }
1615 REASON_SET(reason, PFRES_TS);
1616 return (PF_DROP);
1617 }
1618 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1619 memcpy(&tsval, &opt[2],
1620 sizeof(u_int32_t));
1621 if (tsval && src->scrub &&
1622 (src->scrub->pfss_flags &
1623 PFSS_TIMESTAMP)) {
1624 tsval = ntohl(tsval);
1625 pf_patch_32_unaligned(m,
1626 &th->th_sum,
1627 &opt[2],
1628 htonl(tsval +
1629 src->scrub->pfss_ts_mod),
1630 PF_ALGNMNT(startoff),
1631 0);
1632 copyback = 1;
1633 }
1634
1635 /* Modulate TS reply iff valid (!0) */
1636 memcpy(&tsecr, &opt[6],
1637 sizeof(u_int32_t));
1638 if (tsecr && dst->scrub &&
1639 (dst->scrub->pfss_flags &
1640 PFSS_TIMESTAMP)) {
1641 tsecr = ntohl(tsecr)
1642 - dst->scrub->pfss_ts_mod;
1643 pf_patch_32_unaligned(m,
1644 &th->th_sum,
1645 &opt[6],
1646 htonl(tsecr),
1647 PF_ALGNMNT(startoff),
1648 0);
1649 copyback = 1;
1650 }
1651 got_ts = 1;
1652 }
1653 /* FALLTHROUGH */
1654 default:
1655 hlen -= MAX(opt[1], 2);
1656 opt += MAX(opt[1], 2);
1657 break;
1658 }
1659 }
1660 if (copyback) {
1661 /* Copyback the options, caller copys back header */
1662 *writeback = 1;
1663 m_copyback(m, off + sizeof(struct tcphdr),
1664 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1665 sizeof(struct tcphdr));
1666 }
1667 }
1668
1669 /*
1670 * Must invalidate PAWS checks on connections idle for too long.
1671 * The fastest allowed timestamp clock is 1ms. That turns out to
1672 * be about 24 days before it wraps. XXX Right now our lowerbound
1673 * TS echo check only works for the first 12 days of a connection
1674 * when the TS has exhausted half its 32bit space
1675 */
1676 #define TS_MAX_IDLE (24*24*60*60)
1677 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1678
1679 getmicrouptime(&uptime);
1680 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1681 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1682 time_uptime - state->creation > TS_MAX_CONN)) {
1683 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1684 DPFPRINTF(("src idled out of PAWS\n"));
1685 pf_print_state(state);
1686 printf("\n");
1687 }
1688 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1689 | PFSS_PAWS_IDLED;
1690 }
1691 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1692 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1693 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1694 DPFPRINTF(("dst idled out of PAWS\n"));
1695 pf_print_state(state);
1696 printf("\n");
1697 }
1698 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1699 | PFSS_PAWS_IDLED;
1700 }
1701
1702 if (got_ts && src->scrub && dst->scrub &&
1703 (src->scrub->pfss_flags & PFSS_PAWS) &&
1704 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1705 /* Validate that the timestamps are "in-window".
1706 * RFC1323 describes TCP Timestamp options that allow
1707 * measurement of RTT (round trip time) and PAWS
1708 * (protection against wrapped sequence numbers). PAWS
1709 * gives us a set of rules for rejecting packets on
1710 * long fat pipes (packets that were somehow delayed
1711 * in transit longer than the time it took to send the
1712 * full TCP sequence space of 4Gb). We can use these
1713 * rules and infer a few others that will let us treat
1714 * the 32bit timestamp and the 32bit echoed timestamp
1715 * as sequence numbers to prevent a blind attacker from
1716 * inserting packets into a connection.
1717 *
1718 * RFC1323 tells us:
1719 * - The timestamp on this packet must be greater than
1720 * or equal to the last value echoed by the other
1721 * endpoint. The RFC says those will be discarded
1722 * since it is a dup that has already been acked.
1723 * This gives us a lowerbound on the timestamp.
1724 * timestamp >= other last echoed timestamp
1725 * - The timestamp will be less than or equal to
1726 * the last timestamp plus the time between the
1727 * last packet and now. The RFC defines the max
1728 * clock rate as 1ms. We will allow clocks to be
1729 * up to 10% fast and will allow a total difference
1730 * or 30 seconds due to a route change. And this
1731 * gives us an upperbound on the timestamp.
1732 * timestamp <= last timestamp + max ticks
1733 * We have to be careful here. Windows will send an
1734 * initial timestamp of zero and then initialize it
1735 * to a random value after the 3whs; presumably to
1736 * avoid a DoS by having to call an expensive RNG
1737 * during a SYN flood. Proof MS has at least one
1738 * good security geek.
1739 *
1740 * - The TCP timestamp option must also echo the other
1741 * endpoints timestamp. The timestamp echoed is the
1742 * one carried on the earliest unacknowledged segment
1743 * on the left edge of the sequence window. The RFC
1744 * states that the host will reject any echoed
1745 * timestamps that were larger than any ever sent.
1746 * This gives us an upperbound on the TS echo.
1747 * tescr <= largest_tsval
1748 * - The lowerbound on the TS echo is a little more
1749 * tricky to determine. The other endpoint's echoed
1750 * values will not decrease. But there may be
1751 * network conditions that re-order packets and
1752 * cause our view of them to decrease. For now the
1753 * only lowerbound we can safely determine is that
1754 * the TS echo will never be less than the original
1755 * TS. XXX There is probably a better lowerbound.
1756 * Remove TS_MAX_CONN with better lowerbound check.
1757 * tescr >= other original TS
1758 *
1759 * It is also important to note that the fastest
1760 * timestamp clock of 1ms will wrap its 32bit space in
1761 * 24 days. So we just disable TS checking after 24
1762 * days of idle time. We actually must use a 12d
1763 * connection limit until we can come up with a better
1764 * lowerbound to the TS echo check.
1765 */
1766 struct timeval delta_ts;
1767 int ts_fudge;
1768
1769 /*
1770 * PFTM_TS_DIFF is how many seconds of leeway to allow
1771 * a host's timestamp. This can happen if the previous
1772 * packet got delayed in transit for much longer than
1773 * this packet.
1774 */
1775 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
1776 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
1777
1778 /* Calculate max ticks since the last timestamp */
1779 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
1780 #define TS_MICROSECS 1000000 /* microseconds per second */
1781 delta_ts = uptime;
1782 timevalsub(&delta_ts, &src->scrub->pfss_last);
1783 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1784 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1785
1786 if ((src->state >= TCPS_ESTABLISHED &&
1787 dst->state >= TCPS_ESTABLISHED) &&
1788 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1789 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1790 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1791 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1792 /* Bad RFC1323 implementation or an insertion attack.
1793 *
1794 * - Solaris 2.6 and 2.7 are known to send another ACK
1795 * after the FIN,FIN|ACK,ACK closing that carries
1796 * an old timestamp.
1797 */
1798
1799 DPFPRINTF(("Timestamp failed %c%c%c%c\n",
1800 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '' : ' ',
1801 SEQ_GT(tsval, src->scrub->pfss_tsval +
1802 tsval_from_last) ? '1' : ' ',
1803 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1804 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
1805 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u "
1806 "idle: %jus %lums\n",
1807 tsval, tsecr, tsval_from_last,
1808 (uintmax_t)delta_ts.tv_sec,
1809 delta_ts.tv_usec / 1000));
1810 DPFPRINTF((" src->tsval: %u tsecr: %u\n",
1811 src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
1812 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u"
1813 "\n", dst->scrub->pfss_tsval,
1814 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
1815 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1816 pf_print_state(state);
1817 pf_print_flags(th->th_flags);
1818 printf("\n");
1819 }
1820 REASON_SET(reason, PFRES_TS);
1821 return (PF_DROP);
1822 }
1823
1824 /* XXX I'd really like to require tsecr but it's optional */
1825
1826 } else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
1827 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1828 || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
1829 src->scrub && dst->scrub &&
1830 (src->scrub->pfss_flags & PFSS_PAWS) &&
1831 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1832 /* Didn't send a timestamp. Timestamps aren't really useful
1833 * when:
1834 * - connection opening or closing (often not even sent).
1835 * but we must not let an attacker to put a FIN on a
1836 * data packet to sneak it through our ESTABLISHED check.
1837 * - on a TCP reset. RFC suggests not even looking at TS.
1838 * - on an empty ACK. The TS will not be echoed so it will
1839 * probably not help keep the RTT calculation in sync and
1840 * there isn't as much danger when the sequence numbers
1841 * got wrapped. So some stacks don't include TS on empty
1842 * ACKs :-(
1843 *
1844 * To minimize the disruption to mostly RFC1323 conformant
1845 * stacks, we will only require timestamps on data packets.
1846 *
1847 * And what do ya know, we cannot require timestamps on data
1848 * packets. There appear to be devices that do legitimate
1849 * TCP connection hijacking. There are HTTP devices that allow
1850 * a 3whs (with timestamps) and then buffer the HTTP request.
1851 * If the intermediate device has the HTTP response cache, it
1852 * will spoof the response but not bother timestamping its
1853 * packets. So we can look for the presence of a timestamp in
1854 * the first data packet and if there, require it in all future
1855 * packets.
1856 */
1857
1858 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1859 /*
1860 * Hey! Someone tried to sneak a packet in. Or the
1861 * stack changed its RFC1323 behavior?!?!
1862 */
1863 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1864 DPFPRINTF(("Did not receive expected RFC1323 "
1865 "timestamp\n"));
1866 pf_print_state(state);
1867 pf_print_flags(th->th_flags);
1868 printf("\n");
1869 }
1870 REASON_SET(reason, PFRES_TS);
1871 return (PF_DROP);
1872 }
1873 }
1874
1875 /*
1876 * We will note if a host sends his data packets with or without
1877 * timestamps. And require all data packets to contain a timestamp
1878 * if the first does. PAWS implicitly requires that all data packets be
1879 * timestamped. But I think there are middle-man devices that hijack
1880 * TCP streams immediately after the 3whs and don't timestamp their
1881 * packets (seen in a WWW accelerator or cache).
1882 */
1883 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1884 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1885 if (got_ts)
1886 src->scrub->pfss_flags |= PFSS_DATA_TS;
1887 else {
1888 src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1889 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1890 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1891 /* Don't warn if other host rejected RFC1323 */
1892 DPFPRINTF(("Broken RFC1323 stack did not "
1893 "timestamp data packet. Disabled PAWS "
1894 "security.\n"));
1895 pf_print_state(state);
1896 pf_print_flags(th->th_flags);
1897 printf("\n");
1898 }
1899 }
1900 }
1901
1902 /*
1903 * Update PAWS values
1904 */
1905 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1906 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1907 getmicrouptime(&src->scrub->pfss_last);
1908 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1909 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1910 src->scrub->pfss_tsval = tsval;
1911
1912 if (tsecr) {
1913 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1914 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1915 src->scrub->pfss_tsecr = tsecr;
1916
1917 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1918 (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1919 src->scrub->pfss_tsval0 == 0)) {
1920 /* tsval0 MUST be the lowest timestamp */
1921 src->scrub->pfss_tsval0 = tsval;
1922 }
1923
1924 /* Only fully initialized after a TS gets echoed */
1925 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1926 src->scrub->pfss_flags |= PFSS_PAWS;
1927 }
1928 }
1929
1930 /* I have a dream.... TCP segment reassembly.... */
1931 return (0);
1932 }
1933
1934 static int
1935 pf_normalize_tcpopt(struct pf_krule *r, struct mbuf *m, struct tcphdr *th,
1936 int off, sa_family_t af)
1937 {
1938 u_int16_t *mss;
1939 int thoff;
1940 int opt, cnt, optlen = 0;
1941 int rewrite = 0;
1942 u_char opts[TCP_MAXOLEN];
1943 u_char *optp = opts;
1944 size_t startoff;
1945
1946 thoff = th->th_off << 2;
1947 cnt = thoff - sizeof(struct tcphdr);
1948
1949 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt,
1950 NULL, NULL, af))
1951 return (rewrite);
1952
1953 for (; cnt > 0; cnt -= optlen, optp += optlen) {
1954 startoff = optp - opts;
1955 opt = optp[0];
1956 if (opt == TCPOPT_EOL)
1957 break;
1958 if (opt == TCPOPT_NOP)
1959 optlen = 1;
1960 else {
1961 if (cnt < 2)
1962 break;
1963 optlen = optp[1];
1964 if (optlen < 2 || optlen > cnt)
1965 break;
1966 }
1967 switch (opt) {
1968 case TCPOPT_MAXSEG:
1969 mss = (u_int16_t *)(optp + 2);
1970 if ((ntohs(*mss)) > r->max_mss) {
1971 pf_patch_16_unaligned(m,
1972 &th->th_sum,
1973 mss, htons(r->max_mss),
1974 PF_ALGNMNT(startoff),
1975 0);
1976 rewrite = 1;
1977 }
1978 break;
1979 default:
1980 break;
1981 }
1982 }
1983
1984 if (rewrite)
1985 m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts);
1986
1987 return (rewrite);
1988 }
1989
1990 #ifdef INET
1991 static void
1992 pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
1993 {
1994 struct mbuf *m = *m0;
1995 struct ip *h = mtod(m, struct ip *);
1996
1997 /* Clear IP_DF if no-df was requested */
1998 if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
1999 u_int16_t ip_off = h->ip_off;
2000
2001 h->ip_off &= htons(~IP_DF);
2002 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
2003 }
2004
2005 /* Enforce a minimum ttl, may cause endless packet loops */
2006 if (min_ttl && h->ip_ttl < min_ttl) {
2007 u_int16_t ip_ttl = h->ip_ttl;
2008
2009 h->ip_ttl = min_ttl;
2010 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
2011 }
2012
2013 /* Enforce tos */
2014 if (flags & PFRULE_SET_TOS) {
2015 u_int16_t ov, nv;
2016
2017 ov = *(u_int16_t *)h;
2018 h->ip_tos = tos | (h->ip_tos & IPTOS_ECN_MASK);
2019 nv = *(u_int16_t *)h;
2020
2021 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
2022 }
2023
2024 /* random-id, but not for fragments */
2025 if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
2026 uint16_t ip_id = h->ip_id;
2027
2028 ip_fillid(h);
2029 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
2030 }
2031 }
2032 #endif /* INET */
2033
2034 #ifdef INET6
2035 static void
2036 pf_scrub_ip6(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
2037 {
2038 struct mbuf *m = *m0;
2039 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
2040
2041 /* Enforce a minimum ttl, may cause endless packet loops */
2042 if (min_ttl && h->ip6_hlim < min_ttl)
2043 h->ip6_hlim = min_ttl;
2044
2045 /* Enforce tos. Set traffic class bits */
2046 if (flags & PFRULE_SET_TOS) {
2047 h->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK;
2048 h->ip6_flow |= htonl((tos | IPV6_ECN(h)) << 20);
2049 }
2050 }
2051 #endif
Cache object: ed7822783ae5d5a37015626d936b43fe
|