FreeBSD/Linux Kernel Cross Reference
sys/netinet6/frag6.c
1 /*-
2 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the project nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include "opt_rss.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/hash.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/domain.h>
43 #include <sys/eventhandler.h>
44 #include <sys/protosw.h>
45 #include <sys/socket.h>
46 #include <sys/errno.h>
47 #include <sys/time.h>
48 #include <sys/kernel.h>
49 #include <sys/syslog.h>
50
51 #include <machine/atomic.h>
52
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/netisr.h>
56 #include <net/route.h>
57 #include <net/vnet.h>
58
59 #include <netinet/in.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip6.h>
62 #include <netinet6/ip6_var.h>
63 #include <netinet/icmp6.h>
64 #include <netinet/in_systm.h> /* for ECN definitions */
65 #include <netinet/ip.h> /* for ECN definitions */
66
67 #include <security/mac/mac_framework.h>
68
69 /*
70 * Reassembly headers are stored in hash buckets.
71 */
72 #define IP6REASS_NHASH_LOG2 10
73 #define IP6REASS_NHASH (1 << IP6REASS_NHASH_LOG2)
74 #define IP6REASS_HMASK (IP6REASS_NHASH - 1)
75
76 static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *,
77 uint32_t bucket __unused);
78 static void frag6_deq(struct ip6asfrag *, uint32_t bucket __unused);
79 static void frag6_insque_head(struct ip6q *, struct ip6q *,
80 uint32_t bucket);
81 static void frag6_remque(struct ip6q *, uint32_t bucket);
82 static void frag6_freef(struct ip6q *, uint32_t bucket);
83
84 struct ip6qbucket {
85 struct ip6q ip6q;
86 struct mtx lock;
87 int count;
88 };
89
90 static VNET_DEFINE(volatile u_int, frag6_nfragpackets);
91 volatile u_int frag6_nfrags = 0;
92 static VNET_DEFINE(struct ip6qbucket, ip6q[IP6REASS_NHASH]);
93 static VNET_DEFINE(uint32_t, ip6q_hashseed);
94
95 #define V_frag6_nfragpackets VNET(frag6_nfragpackets)
96 #define V_ip6q VNET(ip6q)
97 #define V_ip6q_hashseed VNET(ip6q_hashseed)
98
99 #define IP6Q_LOCK(i) mtx_lock(&V_ip6q[(i)].lock)
100 #define IP6Q_TRYLOCK(i) mtx_trylock(&V_ip6q[(i)].lock)
101 #define IP6Q_LOCK_ASSERT(i) mtx_assert(&V_ip6q[(i)].lock, MA_OWNED)
102 #define IP6Q_UNLOCK(i) mtx_unlock(&V_ip6q[(i)].lock)
103 #define IP6Q_HEAD(i) (&V_ip6q[(i)].ip6q)
104
105 static MALLOC_DEFINE(M_FTABLE, "fragment", "fragment reassembly header");
106
107 /*
108 * By default, limit the number of IP6 fragments across all reassembly
109 * queues to 1/32 of the total number of mbuf clusters.
110 *
111 * Limit the total number of reassembly queues per VNET to the
112 * IP6 fragment limit, but ensure the limit will not allow any bucket
113 * to grow above 100 items. (The bucket limit is
114 * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
115 * multiplier to reach a 100-item limit.)
116 * The 100-item limit was chosen as brief testing seems to show that
117 * this produces "reasonable" performance on some subset of systems
118 * under DoS attack.
119 */
120 #define IP6_MAXFRAGS (nmbclusters / 32)
121 #define IP6_MAXFRAGPACKETS (imin(IP6_MAXFRAGS, IP6REASS_NHASH * 50))
122
123 /*
124 * Initialise reassembly queue and fragment identifier.
125 */
126 void
127 frag6_set_bucketsize()
128 {
129 int i;
130
131 if ((i = V_ip6_maxfragpackets) > 0)
132 V_ip6_maxfragbucketsize = imax(i / (IP6REASS_NHASH / 2), 1);
133 }
134
135 static void
136 frag6_change(void *tag)
137 {
138 VNET_ITERATOR_DECL(vnet_iter);
139
140 ip6_maxfrags = IP6_MAXFRAGS;
141 VNET_LIST_RLOCK_NOSLEEP();
142 VNET_FOREACH(vnet_iter) {
143 CURVNET_SET(vnet_iter);
144 V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
145 frag6_set_bucketsize();
146 CURVNET_RESTORE();
147 }
148 VNET_LIST_RUNLOCK_NOSLEEP();
149 }
150
151 void
152 frag6_init(void)
153 {
154 struct ip6q *q6;
155 int i;
156
157 V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
158 frag6_set_bucketsize();
159 for (i = 0; i < IP6REASS_NHASH; i++) {
160 q6 = IP6Q_HEAD(i);
161 q6->ip6q_next = q6->ip6q_prev = q6;
162 mtx_init(&V_ip6q[i].lock, "ip6qlock", NULL, MTX_DEF);
163 V_ip6q[i].count = 0;
164 }
165 V_ip6q_hashseed = arc4random();
166 V_ip6_maxfragsperpacket = 64;
167 if (!IS_DEFAULT_VNET(curvnet))
168 return;
169
170 ip6_maxfrags = IP6_MAXFRAGS;
171 EVENTHANDLER_REGISTER(nmbclusters_change,
172 frag6_change, NULL, EVENTHANDLER_PRI_ANY);
173 }
174
175 /*
176 * In RFC2460, fragment and reassembly rule do not agree with each other,
177 * in terms of next header field handling in fragment header.
178 * While the sender will use the same value for all of the fragmented packets,
179 * receiver is suggested not to check the consistency.
180 *
181 * fragment rule (p20):
182 * (2) A Fragment header containing:
183 * The Next Header value that identifies the first header of
184 * the Fragmentable Part of the original packet.
185 * -> next header field is same for all fragments
186 *
187 * reassembly rule (p21):
188 * The Next Header field of the last header of the Unfragmentable
189 * Part is obtained from the Next Header field of the first
190 * fragment's Fragment header.
191 * -> should grab it from the first fragment only
192 *
193 * The following note also contradicts with fragment rule - no one is going to
194 * send different fragment with different next header field.
195 *
196 * additional note (p22):
197 * The Next Header values in the Fragment headers of different
198 * fragments of the same original packet may differ. Only the value
199 * from the Offset zero fragment packet is used for reassembly.
200 * -> should grab it from the first fragment only
201 *
202 * There is no explicit reason given in the RFC. Historical reason maybe?
203 */
204 /*
205 * Fragment input
206 */
207 int
208 frag6_input(struct mbuf **mp, int *offp, int proto)
209 {
210 struct mbuf *m = *mp, *t;
211 struct ip6_hdr *ip6;
212 struct ip6_frag *ip6f;
213 struct ip6q *head, *q6;
214 struct ip6asfrag *af6, *ip6af, *af6dwn;
215 struct in6_ifaddr *ia;
216 int offset = *offp, nxt, i, next;
217 int first_frag = 0;
218 int fragoff, frgpartlen; /* must be larger than u_int16_t */
219 uint32_t hashkey[(sizeof(struct in6_addr) * 2 +
220 sizeof(ip6f->ip6f_ident)) / sizeof(uint32_t)];
221 uint32_t hash, *hashkeyp;
222 struct ifnet *dstifp;
223 u_int8_t ecn, ecn0;
224 #ifdef RSS
225 struct m_tag *mtag;
226 struct ip6_direct_ctx *ip6dc;
227 #endif
228
229 #if 0
230 char ip6buf[INET6_ADDRSTRLEN];
231 #endif
232
233 ip6 = mtod(m, struct ip6_hdr *);
234 #ifndef PULLDOWN_TEST
235 IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), IPPROTO_DONE);
236 ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
237 #else
238 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f));
239 if (ip6f == NULL)
240 return (IPPROTO_DONE);
241 #endif
242
243 dstifp = NULL;
244 /* find the destination interface of the packet. */
245 ia = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
246 if (ia != NULL) {
247 dstifp = ia->ia_ifp;
248 ifa_free(&ia->ia_ifa);
249 }
250 /* jumbo payload can't contain a fragment header */
251 if (ip6->ip6_plen == 0) {
252 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
253 in6_ifstat_inc(dstifp, ifs6_reass_fail);
254 return IPPROTO_DONE;
255 }
256
257 /*
258 * check whether fragment packet's fragment length is
259 * multiple of 8 octets.
260 * sizeof(struct ip6_frag) == 8
261 * sizeof(struct ip6_hdr) = 40
262 */
263 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
264 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
265 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
266 offsetof(struct ip6_hdr, ip6_plen));
267 in6_ifstat_inc(dstifp, ifs6_reass_fail);
268 return IPPROTO_DONE;
269 }
270
271 IP6STAT_INC(ip6s_fragments);
272 in6_ifstat_inc(dstifp, ifs6_reass_reqd);
273
274 /* offset now points to data portion */
275 offset += sizeof(struct ip6_frag);
276
277 /*
278 * RFC 6946: Handle "atomic" fragments (offset and m bit set to 0)
279 * upfront, unrelated to any reassembly. Just skip the fragment header.
280 */
281 if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) {
282 /* XXX-BZ we want dedicated counters for this. */
283 IP6STAT_INC(ip6s_reassembled);
284 in6_ifstat_inc(dstifp, ifs6_reass_ok);
285 *offp = offset;
286 m->m_flags |= M_FRAGMENTED;
287 return (ip6f->ip6f_nxt);
288 }
289
290 /* Get fragment length and discard 0-byte fragments. */
291 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
292 if (frgpartlen == 0) {
293 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
294 offsetof(struct ip6_hdr, ip6_plen));
295 in6_ifstat_inc(dstifp, ifs6_reass_fail);
296 IP6STAT_INC(ip6s_fragdropped);
297 return IPPROTO_DONE;
298 }
299
300 hashkeyp = hashkey;
301 memcpy(hashkeyp, &ip6->ip6_src, sizeof(struct in6_addr));
302 hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
303 memcpy(hashkeyp, &ip6->ip6_dst, sizeof(struct in6_addr));
304 hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
305 *hashkeyp = ip6f->ip6f_ident;
306 hash = jenkins_hash32(hashkey, nitems(hashkey), V_ip6q_hashseed);
307 hash &= IP6REASS_HMASK;
308 head = IP6Q_HEAD(hash);
309 IP6Q_LOCK(hash);
310
311 /*
312 * Enforce upper bound on number of fragments.
313 * If maxfrag is 0, never accept fragments.
314 * If maxfrag is -1, accept all fragments without limitation.
315 */
316 if (ip6_maxfrags < 0)
317 ;
318 else if (frag6_nfrags >= (u_int)ip6_maxfrags)
319 goto dropfrag;
320
321 for (q6 = head->ip6q_next; q6 != head; q6 = q6->ip6q_next)
322 if (ip6f->ip6f_ident == q6->ip6q_ident &&
323 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
324 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)
325 #ifdef MAC
326 && mac_ip6q_match(m, q6)
327 #endif
328 )
329 break;
330
331 if (q6 == head) {
332 /*
333 * the first fragment to arrive, create a reassembly queue.
334 */
335 first_frag = 1;
336
337 /*
338 * Enforce upper bound on number of fragmented packets
339 * for which we attempt reassembly;
340 * If maxfragpackets is 0, never accept fragments.
341 * If maxfragpackets is -1, accept all fragments without
342 * limitation.
343 */
344 if (V_ip6_maxfragpackets < 0)
345 ;
346 else if (V_ip6q[hash].count >= V_ip6_maxfragbucketsize ||
347 V_frag6_nfragpackets >= (u_int)V_ip6_maxfragpackets)
348 goto dropfrag;
349 atomic_add_int(&V_frag6_nfragpackets, 1);
350 q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FTABLE,
351 M_NOWAIT);
352 if (q6 == NULL)
353 goto dropfrag;
354 bzero(q6, sizeof(*q6));
355 #ifdef MAC
356 if (mac_ip6q_init(q6, M_NOWAIT) != 0) {
357 free(q6, M_FTABLE);
358 goto dropfrag;
359 }
360 mac_ip6q_create(m, q6);
361 #endif
362 frag6_insque_head(q6, head, hash);
363
364 /* ip6q_nxt will be filled afterwards, from 1st fragment */
365 q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6;
366 #ifdef notyet
367 q6->ip6q_nxtp = (u_char *)nxtp;
368 #endif
369 q6->ip6q_ident = ip6f->ip6f_ident;
370 q6->ip6q_ttl = IPV6_FRAGTTL;
371 q6->ip6q_src = ip6->ip6_src;
372 q6->ip6q_dst = ip6->ip6_dst;
373 q6->ip6q_ecn =
374 (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
375 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
376
377 q6->ip6q_nfrag = 0;
378 }
379
380 /*
381 * If it's the 1st fragment, record the length of the
382 * unfragmentable part and the next header of the fragment header.
383 */
384 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
385 if (fragoff == 0) {
386 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
387 sizeof(struct ip6_frag);
388 q6->ip6q_nxt = ip6f->ip6f_nxt;
389 }
390
391 /*
392 * Check that the reassembled packet would not exceed 65535 bytes
393 * in size.
394 * If it would exceed, discard the fragment and return an ICMP error.
395 */
396 if (q6->ip6q_unfrglen >= 0) {
397 /* The 1st fragment has already arrived. */
398 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
399 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
400 offset - sizeof(struct ip6_frag) +
401 offsetof(struct ip6_frag, ip6f_offlg));
402 IP6Q_UNLOCK(hash);
403 return (IPPROTO_DONE);
404 }
405 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
406 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
407 offset - sizeof(struct ip6_frag) +
408 offsetof(struct ip6_frag, ip6f_offlg));
409 IP6Q_UNLOCK(hash);
410 return (IPPROTO_DONE);
411 }
412 /*
413 * If it's the first fragment, do the above check for each
414 * fragment already stored in the reassembly queue.
415 */
416 if (fragoff == 0) {
417 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
418 af6 = af6dwn) {
419 af6dwn = af6->ip6af_down;
420
421 if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen >
422 IPV6_MAXPACKET) {
423 struct mbuf *merr = IP6_REASS_MBUF(af6);
424 struct ip6_hdr *ip6err;
425 int erroff = af6->ip6af_offset;
426
427 /* dequeue the fragment. */
428 frag6_deq(af6, hash);
429 free(af6, M_FTABLE);
430
431 /* adjust pointer. */
432 ip6err = mtod(merr, struct ip6_hdr *);
433
434 /*
435 * Restore source and destination addresses
436 * in the erroneous IPv6 header.
437 */
438 ip6err->ip6_src = q6->ip6q_src;
439 ip6err->ip6_dst = q6->ip6q_dst;
440
441 icmp6_error(merr, ICMP6_PARAM_PROB,
442 ICMP6_PARAMPROB_HEADER,
443 erroff - sizeof(struct ip6_frag) +
444 offsetof(struct ip6_frag, ip6f_offlg));
445 }
446 }
447 }
448
449 ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FTABLE,
450 M_NOWAIT);
451 if (ip6af == NULL)
452 goto dropfrag;
453 bzero(ip6af, sizeof(*ip6af));
454 ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG;
455 ip6af->ip6af_off = fragoff;
456 ip6af->ip6af_frglen = frgpartlen;
457 ip6af->ip6af_offset = offset;
458 IP6_REASS_MBUF(ip6af) = m;
459
460 if (first_frag) {
461 af6 = (struct ip6asfrag *)q6;
462 goto insert;
463 }
464
465 /*
466 * Handle ECN by comparing this segment with the first one;
467 * if CE is set, do not lose CE.
468 * drop if CE and not-ECT are mixed for the same packet.
469 */
470 ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
471 ecn0 = q6->ip6q_ecn;
472 if (ecn == IPTOS_ECN_CE) {
473 if (ecn0 == IPTOS_ECN_NOTECT) {
474 free(ip6af, M_FTABLE);
475 goto dropfrag;
476 }
477 if (ecn0 != IPTOS_ECN_CE)
478 q6->ip6q_ecn = IPTOS_ECN_CE;
479 }
480 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
481 free(ip6af, M_FTABLE);
482 goto dropfrag;
483 }
484
485 /*
486 * Find a segment which begins after this one does.
487 */
488 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
489 af6 = af6->ip6af_down)
490 if (af6->ip6af_off > ip6af->ip6af_off)
491 break;
492
493 #if 0
494 /*
495 * If there is a preceding segment, it may provide some of
496 * our data already. If so, drop the data from the incoming
497 * segment. If it provides all of our data, drop us.
498 */
499 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
500 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
501 - ip6af->ip6af_off;
502 if (i > 0) {
503 if (i >= ip6af->ip6af_frglen)
504 goto dropfrag;
505 m_adj(IP6_REASS_MBUF(ip6af), i);
506 ip6af->ip6af_off += i;
507 ip6af->ip6af_frglen -= i;
508 }
509 }
510
511 /*
512 * While we overlap succeeding segments trim them or,
513 * if they are completely covered, dequeue them.
514 */
515 while (af6 != (struct ip6asfrag *)q6 &&
516 ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) {
517 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
518 if (i < af6->ip6af_frglen) {
519 af6->ip6af_frglen -= i;
520 af6->ip6af_off += i;
521 m_adj(IP6_REASS_MBUF(af6), i);
522 break;
523 }
524 af6 = af6->ip6af_down;
525 m_freem(IP6_REASS_MBUF(af6->ip6af_up));
526 frag6_deq(af6->ip6af_up, hash);
527 }
528 #else
529 /*
530 * If the incoming framgent overlaps some existing fragments in
531 * the reassembly queue, drop it, since it is dangerous to override
532 * existing fragments from a security point of view.
533 * We don't know which fragment is the bad guy - here we trust
534 * fragment that came in earlier, with no real reason.
535 *
536 * Note: due to changes after disabling this part, mbuf passed to
537 * m_adj() below now does not meet the requirement.
538 */
539 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
540 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
541 - ip6af->ip6af_off;
542 if (i > 0) {
543 #if 0 /* suppress the noisy log */
544 log(LOG_ERR, "%d bytes of a fragment from %s "
545 "overlaps the previous fragment\n",
546 i, ip6_sprintf(ip6buf, &q6->ip6q_src));
547 #endif
548 free(ip6af, M_FTABLE);
549 goto dropfrag;
550 }
551 }
552 if (af6 != (struct ip6asfrag *)q6) {
553 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
554 if (i > 0) {
555 #if 0 /* suppress the noisy log */
556 log(LOG_ERR, "%d bytes of a fragment from %s "
557 "overlaps the succeeding fragment",
558 i, ip6_sprintf(ip6buf, &q6->ip6q_src));
559 #endif
560 free(ip6af, M_FTABLE);
561 goto dropfrag;
562 }
563 }
564 #endif
565
566 insert:
567 #ifdef MAC
568 if (!first_frag)
569 mac_ip6q_update(m, q6);
570 #endif
571
572 /*
573 * Stick new segment in its place;
574 * check for complete reassembly.
575 * If not complete, check fragment limit.
576 * Move to front of packet queue, as we are
577 * the most recently active fragmented packet.
578 */
579 frag6_enq(ip6af, af6->ip6af_up, hash);
580 atomic_add_int(&frag6_nfrags, 1);
581 q6->ip6q_nfrag++;
582 #if 0 /* xxx */
583 if (q6 != head->ip6q_next) {
584 frag6_remque(q6, hash);
585 frag6_insque_head(q6, head, hash);
586 }
587 #endif
588 next = 0;
589 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
590 af6 = af6->ip6af_down) {
591 if (af6->ip6af_off != next) {
592 if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
593 IP6STAT_INC(ip6s_fragdropped);
594 frag6_freef(q6, hash);
595 }
596 IP6Q_UNLOCK(hash);
597 return IPPROTO_DONE;
598 }
599 next += af6->ip6af_frglen;
600 }
601 if (af6->ip6af_up->ip6af_mff) {
602 if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
603 IP6STAT_INC(ip6s_fragdropped);
604 frag6_freef(q6, hash);
605 }
606 IP6Q_UNLOCK(hash);
607 return IPPROTO_DONE;
608 }
609
610 /*
611 * Reassembly is complete; concatenate fragments.
612 */
613 ip6af = q6->ip6q_down;
614 t = m = IP6_REASS_MBUF(ip6af);
615 af6 = ip6af->ip6af_down;
616 frag6_deq(ip6af, hash);
617 while (af6 != (struct ip6asfrag *)q6) {
618 m->m_pkthdr.csum_flags &=
619 IP6_REASS_MBUF(af6)->m_pkthdr.csum_flags;
620 m->m_pkthdr.csum_data +=
621 IP6_REASS_MBUF(af6)->m_pkthdr.csum_data;
622
623 af6dwn = af6->ip6af_down;
624 frag6_deq(af6, hash);
625 while (t->m_next)
626 t = t->m_next;
627 m_adj(IP6_REASS_MBUF(af6), af6->ip6af_offset);
628 m_demote_pkthdr(IP6_REASS_MBUF(af6));
629 m_cat(t, IP6_REASS_MBUF(af6));
630 free(af6, M_FTABLE);
631 af6 = af6dwn;
632 }
633
634 while (m->m_pkthdr.csum_data & 0xffff0000)
635 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
636 (m->m_pkthdr.csum_data >> 16);
637
638 /* adjust offset to point where the original next header starts */
639 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
640 free(ip6af, M_FTABLE);
641 ip6 = mtod(m, struct ip6_hdr *);
642 ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr));
643 if (q6->ip6q_ecn == IPTOS_ECN_CE)
644 ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
645 nxt = q6->ip6q_nxt;
646 #ifdef notyet
647 *q6->ip6q_nxtp = (u_char)(nxt & 0xff);
648 #endif
649
650 if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0) {
651 frag6_remque(q6, hash);
652 atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
653 #ifdef MAC
654 mac_ip6q_destroy(q6);
655 #endif
656 free(q6, M_FTABLE);
657 atomic_subtract_int(&V_frag6_nfragpackets, 1);
658
659 goto dropfrag;
660 }
661
662 /*
663 * Store NXT to the original.
664 */
665 m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
666 (caddr_t)&nxt);
667
668 frag6_remque(q6, hash);
669 atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
670 #ifdef MAC
671 mac_ip6q_reassemble(q6, m);
672 mac_ip6q_destroy(q6);
673 #endif
674 free(q6, M_FTABLE);
675 atomic_subtract_int(&V_frag6_nfragpackets, 1);
676
677 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
678 int plen = 0;
679 for (t = m; t; t = t->m_next)
680 plen += t->m_len;
681 m->m_pkthdr.len = plen;
682 }
683
684 #ifdef RSS
685 mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc),
686 M_NOWAIT);
687 if (mtag == NULL)
688 goto dropfrag;
689
690 ip6dc = (struct ip6_direct_ctx *)(mtag + 1);
691 ip6dc->ip6dc_nxt = nxt;
692 ip6dc->ip6dc_off = offset;
693
694 m_tag_prepend(m, mtag);
695 #endif
696
697 IP6Q_UNLOCK(hash);
698 IP6STAT_INC(ip6s_reassembled);
699 in6_ifstat_inc(dstifp, ifs6_reass_ok);
700
701 #ifdef RSS
702 /*
703 * Queue/dispatch for reprocessing.
704 */
705 netisr_dispatch(NETISR_IPV6_DIRECT, m);
706 return IPPROTO_DONE;
707 #endif
708
709 /*
710 * Tell launch routine the next header
711 */
712
713 *mp = m;
714 *offp = offset;
715
716 return nxt;
717
718 dropfrag:
719 IP6Q_UNLOCK(hash);
720 in6_ifstat_inc(dstifp, ifs6_reass_fail);
721 IP6STAT_INC(ip6s_fragdropped);
722 m_freem(m);
723 return IPPROTO_DONE;
724 }
725
726 /*
727 * Free a fragment reassembly header and all
728 * associated datagrams.
729 */
730 static void
731 frag6_freef(struct ip6q *q6, uint32_t bucket)
732 {
733 struct ip6asfrag *af6, *down6;
734
735 IP6Q_LOCK_ASSERT(bucket);
736
737 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
738 af6 = down6) {
739 struct mbuf *m = IP6_REASS_MBUF(af6);
740
741 down6 = af6->ip6af_down;
742 frag6_deq(af6, bucket);
743
744 /*
745 * Return ICMP time exceeded error for the 1st fragment.
746 * Just free other fragments.
747 */
748 if (af6->ip6af_off == 0) {
749 struct ip6_hdr *ip6;
750
751 /* adjust pointer */
752 ip6 = mtod(m, struct ip6_hdr *);
753
754 /* restore source and destination addresses */
755 ip6->ip6_src = q6->ip6q_src;
756 ip6->ip6_dst = q6->ip6q_dst;
757
758 icmp6_error(m, ICMP6_TIME_EXCEEDED,
759 ICMP6_TIME_EXCEED_REASSEMBLY, 0);
760 } else
761 m_freem(m);
762 free(af6, M_FTABLE);
763 }
764 frag6_remque(q6, bucket);
765 atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
766 #ifdef MAC
767 mac_ip6q_destroy(q6);
768 #endif
769 free(q6, M_FTABLE);
770 atomic_subtract_int(&V_frag6_nfragpackets, 1);
771 }
772
773 /*
774 * Put an ip fragment on a reassembly chain.
775 * Like insque, but pointers in middle of structure.
776 */
777 static void
778 frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6,
779 uint32_t bucket __unused)
780 {
781
782 IP6Q_LOCK_ASSERT(bucket);
783
784 af6->ip6af_up = up6;
785 af6->ip6af_down = up6->ip6af_down;
786 up6->ip6af_down->ip6af_up = af6;
787 up6->ip6af_down = af6;
788 }
789
790 /*
791 * To frag6_enq as remque is to insque.
792 */
793 static void
794 frag6_deq(struct ip6asfrag *af6, uint32_t bucket __unused)
795 {
796
797 IP6Q_LOCK_ASSERT(bucket);
798
799 af6->ip6af_up->ip6af_down = af6->ip6af_down;
800 af6->ip6af_down->ip6af_up = af6->ip6af_up;
801 }
802
803 static void
804 frag6_insque_head(struct ip6q *new, struct ip6q *old, uint32_t bucket)
805 {
806
807 IP6Q_LOCK_ASSERT(bucket);
808 KASSERT(IP6Q_HEAD(bucket) == old,
809 ("%s: attempt to insert at head of wrong bucket"
810 " (bucket=%u, old=%p)", __func__, bucket, old));
811
812 new->ip6q_prev = old;
813 new->ip6q_next = old->ip6q_next;
814 old->ip6q_next->ip6q_prev= new;
815 old->ip6q_next = new;
816 V_ip6q[bucket].count++;
817 }
818
819 static void
820 frag6_remque(struct ip6q *p6, uint32_t bucket)
821 {
822
823 IP6Q_LOCK_ASSERT(bucket);
824
825 p6->ip6q_prev->ip6q_next = p6->ip6q_next;
826 p6->ip6q_next->ip6q_prev = p6->ip6q_prev;
827 V_ip6q[bucket].count--;
828 }
829
830 /*
831 * IPv6 reassembling timer processing;
832 * if a timer expires on a reassembly
833 * queue, discard it.
834 */
835 void
836 frag6_slowtimo(void)
837 {
838 VNET_ITERATOR_DECL(vnet_iter);
839 struct ip6q *head, *q6;
840 int i;
841
842 VNET_LIST_RLOCK_NOSLEEP();
843 VNET_FOREACH(vnet_iter) {
844 CURVNET_SET(vnet_iter);
845 for (i = 0; i < IP6REASS_NHASH; i++) {
846 IP6Q_LOCK(i);
847 head = IP6Q_HEAD(i);
848 q6 = head->ip6q_next;
849 if (q6 == NULL) {
850 /*
851 * XXXJTL: This should never happen. This
852 * should turn into an assertion.
853 */
854 IP6Q_UNLOCK(i);
855 continue;
856 }
857 while (q6 != head) {
858 --q6->ip6q_ttl;
859 q6 = q6->ip6q_next;
860 if (q6->ip6q_prev->ip6q_ttl == 0) {
861 IP6STAT_INC(ip6s_fragtimeout);
862 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
863 frag6_freef(q6->ip6q_prev, i);
864 }
865 }
866 /*
867 * If we are over the maximum number of fragments
868 * (due to the limit being lowered), drain off
869 * enough to get down to the new limit.
870 * Note that we drain all reassembly queues if
871 * maxfragpackets is 0 (fragmentation is disabled),
872 * and don't enforce a limit when maxfragpackets
873 * is negative.
874 */
875 while ((V_ip6_maxfragpackets == 0 ||
876 (V_ip6_maxfragpackets > 0 &&
877 V_ip6q[i].count > V_ip6_maxfragbucketsize)) &&
878 head->ip6q_prev != head) {
879 IP6STAT_INC(ip6s_fragoverflow);
880 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
881 frag6_freef(head->ip6q_prev, i);
882 }
883 IP6Q_UNLOCK(i);
884 }
885 /*
886 * If we are still over the maximum number of fragmented
887 * packets, drain off enough to get down to the new limit.
888 */
889 i = 0;
890 while (V_ip6_maxfragpackets >= 0 &&
891 V_frag6_nfragpackets > (u_int)V_ip6_maxfragpackets) {
892 IP6Q_LOCK(i);
893 head = IP6Q_HEAD(i);
894 if (head->ip6q_prev != head) {
895 IP6STAT_INC(ip6s_fragoverflow);
896 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
897 frag6_freef(head->ip6q_prev, i);
898 }
899 IP6Q_UNLOCK(i);
900 i = (i + 1) % IP6REASS_NHASH;
901 }
902 CURVNET_RESTORE();
903 }
904 VNET_LIST_RUNLOCK_NOSLEEP();
905 }
906
907 /*
908 * Drain off all datagram fragments.
909 */
910 void
911 frag6_drain(void)
912 {
913 VNET_ITERATOR_DECL(vnet_iter);
914 struct ip6q *head;
915 int i;
916
917 VNET_LIST_RLOCK_NOSLEEP();
918 VNET_FOREACH(vnet_iter) {
919 CURVNET_SET(vnet_iter);
920 for (i = 0; i < IP6REASS_NHASH; i++) {
921 if (IP6Q_TRYLOCK(i) == 0)
922 continue;
923 head = IP6Q_HEAD(i);
924 while (head->ip6q_next != head) {
925 IP6STAT_INC(ip6s_fragdropped);
926 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
927 frag6_freef(head->ip6q_next, i);
928 }
929 IP6Q_UNLOCK(i);
930 }
931 CURVNET_RESTORE();
932 }
933 VNET_LIST_RUNLOCK_NOSLEEP();
934 }
935
936 int
937 ip6_deletefraghdr(struct mbuf *m, int offset, int wait)
938 {
939 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
940 struct mbuf *t;
941
942 /* Delete frag6 header. */
943 if (m->m_len >= offset + sizeof(struct ip6_frag)) {
944 /* This is the only possible case with !PULLDOWN_TEST. */
945 bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag),
946 offset);
947 m->m_data += sizeof(struct ip6_frag);
948 m->m_len -= sizeof(struct ip6_frag);
949 } else {
950 /* This comes with no copy if the boundary is on cluster. */
951 if ((t = m_split(m, offset, wait)) == NULL)
952 return (ENOMEM);
953 m_adj(t, sizeof(struct ip6_frag));
954 m_cat(m, t);
955 }
956
957 m->m_flags |= M_FRAGMENTED;
958 return (0);
959 }
Cache object: da01b1e05fac157f300b4c20d068ec33
|