1 /*-
2 * Copyright (c) 2015 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org>
4 * Copyright (c) 1982, 1986, 1988, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_rss.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/eventhandler.h>
42 #include <sys/kernel.h>
43 #include <sys/hash.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/limits.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/sysctl.h>
50 #include <sys/socket.h>
51
52 #include <net/if.h>
53 #include <net/if_var.h>
54 #include <net/rss_config.h>
55 #include <net/netisr.h>
56 #include <net/vnet.h>
57
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/ip_var.h>
61 #include <netinet/in_rss.h>
62 #ifdef MAC
63 #include <security/mac/mac_framework.h>
64 #endif
65
66 SYSCTL_DECL(_net_inet_ip);
67
68 /*
69 * Reassembly headers are stored in hash buckets.
70 */
71 #define IPREASS_NHASH_LOG2 10
72 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
73 #define IPREASS_HMASK (IPREASS_NHASH - 1)
74
75 struct ipqbucket {
76 TAILQ_HEAD(ipqhead, ipq) head;
77 struct mtx lock;
78 int count;
79 };
80
81 VNET_DEFINE_STATIC(struct ipqbucket, ipq[IPREASS_NHASH]);
82 #define V_ipq VNET(ipq)
83 VNET_DEFINE_STATIC(uint32_t, ipq_hashseed);
84 #define V_ipq_hashseed VNET(ipq_hashseed)
85
86 #define IPQ_LOCK(i) mtx_lock(&V_ipq[i].lock)
87 #define IPQ_TRYLOCK(i) mtx_trylock(&V_ipq[i].lock)
88 #define IPQ_UNLOCK(i) mtx_unlock(&V_ipq[i].lock)
89 #define IPQ_LOCK_ASSERT(i) mtx_assert(&V_ipq[i].lock, MA_OWNED)
90
91 VNET_DEFINE_STATIC(int, ipreass_maxbucketsize);
92 #define V_ipreass_maxbucketsize VNET(ipreass_maxbucketsize)
93
94 void ipreass_init(void);
95 void ipreass_drain(void);
96 void ipreass_slowtimo(void);
97 #ifdef VIMAGE
98 void ipreass_destroy(void);
99 #endif
100 static int sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS);
101 static int sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS);
102 static void ipreass_zone_change(void *);
103 static void ipreass_drain_tomax(void);
104 static void ipq_free(struct ipqbucket *, struct ipq *);
105 static struct ipq * ipq_reuse(int);
106
107 static inline void
108 ipq_timeout(struct ipqbucket *bucket, struct ipq *fp)
109 {
110
111 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags);
112 ipq_free(bucket, fp);
113 }
114
115 static inline void
116 ipq_drop(struct ipqbucket *bucket, struct ipq *fp)
117 {
118
119 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
120 ipq_free(bucket, fp);
121 }
122
123 /*
124 * By default, limit the number of IP fragments across all reassembly
125 * queues to 1/32 of the total number of mbuf clusters.
126 *
127 * Limit the total number of reassembly queues per VNET to the
128 * IP fragment limit, but ensure the limit will not allow any bucket
129 * to grow above 100 items. (The bucket limit is
130 * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
131 * multiplier to reach a 100-item limit.)
132 * The 100-item limit was chosen as brief testing seems to show that
133 * this produces "reasonable" performance on some subset of systems
134 * under DoS attack.
135 */
136 #define IP_MAXFRAGS (nmbclusters / 32)
137 #define IP_MAXFRAGPACKETS (imin(IP_MAXFRAGS, IPREASS_NHASH * 50))
138
139 static int maxfrags;
140 static u_int __exclusive_cache_line nfrags;
141 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfrags, CTLFLAG_RW,
142 &maxfrags, 0,
143 "Maximum number of IPv4 fragments allowed across all reassembly queues");
144 SYSCTL_UINT(_net_inet_ip, OID_AUTO, curfrags, CTLFLAG_RD,
145 &nfrags, 0,
146 "Current number of IPv4 fragments across all reassembly queues");
147
148 VNET_DEFINE_STATIC(uma_zone_t, ipq_zone);
149 #define V_ipq_zone VNET(ipq_zone)
150 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_VNET |
151 CTLTYPE_INT | CTLFLAG_RW, NULL, 0, sysctl_maxfragpackets, "I",
152 "Maximum number of IPv4 fragment reassembly queue entries");
153 SYSCTL_UMA_CUR(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_VNET,
154 &VNET_NAME(ipq_zone),
155 "Current number of IPv4 fragment reassembly queue entries");
156
157 VNET_DEFINE_STATIC(int, noreass);
158 #define V_noreass VNET(noreass)
159
160 VNET_DEFINE_STATIC(int, maxfragsperpacket);
161 #define V_maxfragsperpacket VNET(maxfragsperpacket)
162 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_VNET | CTLFLAG_RW,
163 &VNET_NAME(maxfragsperpacket), 0,
164 "Maximum number of IPv4 fragments allowed per packet");
165 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragbucketsize,
166 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0,
167 sysctl_maxfragbucketsize, "I",
168 "Maximum number of IPv4 fragment reassembly queue entries per bucket");
169
170 /*
171 * Take incoming datagram fragment and try to reassemble it into
172 * whole datagram. If the argument is the first fragment or one
173 * in between the function will return NULL and store the mbuf
174 * in the fragment chain. If the argument is the last fragment
175 * the packet will be reassembled and the pointer to the new
176 * mbuf returned for further processing. Only m_tags attached
177 * to the first packet/fragment are preserved.
178 * The IP header is *NOT* adjusted out of iplen.
179 */
180 #define M_IP_FRAG M_PROTO9
181 struct mbuf *
182 ip_reass(struct mbuf *m)
183 {
184 struct ip *ip;
185 struct mbuf *p, *q, *nq, *t;
186 struct ipq *fp;
187 struct ifnet *srcifp;
188 struct ipqhead *head;
189 int i, hlen, next, tmpmax;
190 u_int8_t ecn, ecn0;
191 uint32_t hash, hashkey[3];
192 #ifdef RSS
193 uint32_t rss_hash, rss_type;
194 #endif
195
196 /*
197 * If no reassembling or maxfragsperpacket are 0,
198 * never accept fragments.
199 * Also, drop packet if it would exceed the maximum
200 * number of fragments.
201 */
202 tmpmax = maxfrags;
203 if (V_noreass == 1 || V_maxfragsperpacket == 0 ||
204 (tmpmax >= 0 && atomic_load_int(&nfrags) >= (u_int)tmpmax)) {
205 IPSTAT_INC(ips_fragments);
206 IPSTAT_INC(ips_fragdropped);
207 m_freem(m);
208 return (NULL);
209 }
210
211 ip = mtod(m, struct ip *);
212 hlen = ip->ip_hl << 2;
213
214 /*
215 * Adjust ip_len to not reflect header,
216 * convert offset of this to bytes.
217 */
218 ip->ip_len = htons(ntohs(ip->ip_len) - hlen);
219 /*
220 * Make sure that fragments have a data length
221 * that's a non-zero multiple of 8 bytes, unless
222 * this is the last fragment.
223 */
224 if (ip->ip_len == htons(0) ||
225 ((ip->ip_off & htons(IP_MF)) && (ntohs(ip->ip_len) & 0x7) != 0)) {
226 IPSTAT_INC(ips_toosmall); /* XXX */
227 IPSTAT_INC(ips_fragdropped);
228 m_freem(m);
229 return (NULL);
230 }
231 if (ip->ip_off & htons(IP_MF))
232 m->m_flags |= M_IP_FRAG;
233 else
234 m->m_flags &= ~M_IP_FRAG;
235 ip->ip_off = htons(ntohs(ip->ip_off) << 3);
236
237 /*
238 * Make sure the fragment lies within a packet of valid size.
239 */
240 if (ntohs(ip->ip_len) + ntohs(ip->ip_off) > IP_MAXPACKET) {
241 IPSTAT_INC(ips_toolong);
242 IPSTAT_INC(ips_fragdropped);
243 m_freem(m);
244 return (NULL);
245 }
246
247 /*
248 * Store receive network interface pointer for later.
249 */
250 srcifp = m->m_pkthdr.rcvif;
251
252 /*
253 * Attempt reassembly; if it succeeds, proceed.
254 * ip_reass() will return a different mbuf.
255 */
256 IPSTAT_INC(ips_fragments);
257 m->m_pkthdr.PH_loc.ptr = ip;
258
259 /*
260 * Presence of header sizes in mbufs
261 * would confuse code below.
262 */
263 m->m_data += hlen;
264 m->m_len -= hlen;
265
266 hashkey[0] = ip->ip_src.s_addr;
267 hashkey[1] = ip->ip_dst.s_addr;
268 hashkey[2] = (uint32_t)ip->ip_p << 16;
269 hashkey[2] += ip->ip_id;
270 hash = jenkins_hash32(hashkey, nitems(hashkey), V_ipq_hashseed);
271 hash &= IPREASS_HMASK;
272 head = &V_ipq[hash].head;
273 IPQ_LOCK(hash);
274
275 /*
276 * Look for queue of fragments
277 * of this datagram.
278 */
279 TAILQ_FOREACH(fp, head, ipq_list)
280 if (ip->ip_id == fp->ipq_id &&
281 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
282 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
283 #ifdef MAC
284 mac_ipq_match(m, fp) &&
285 #endif
286 ip->ip_p == fp->ipq_p)
287 break;
288 /*
289 * If first fragment to arrive, create a reassembly queue.
290 */
291 if (fp == NULL) {
292 if (V_ipq[hash].count < V_ipreass_maxbucketsize)
293 fp = uma_zalloc(V_ipq_zone, M_NOWAIT);
294 if (fp == NULL)
295 fp = ipq_reuse(hash);
296 if (fp == NULL)
297 goto dropfrag;
298 #ifdef MAC
299 if (mac_ipq_init(fp, M_NOWAIT) != 0) {
300 uma_zfree(V_ipq_zone, fp);
301 fp = NULL;
302 goto dropfrag;
303 }
304 mac_ipq_create(m, fp);
305 #endif
306 TAILQ_INSERT_HEAD(head, fp, ipq_list);
307 V_ipq[hash].count++;
308 fp->ipq_nfrags = 1;
309 atomic_add_int(&nfrags, 1);
310 fp->ipq_ttl = IPFRAGTTL;
311 fp->ipq_p = ip->ip_p;
312 fp->ipq_id = ip->ip_id;
313 fp->ipq_src = ip->ip_src;
314 fp->ipq_dst = ip->ip_dst;
315 fp->ipq_frags = m;
316 if (m->m_flags & M_IP_FRAG)
317 fp->ipq_maxoff = -1;
318 else
319 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len);
320 m->m_nextpkt = NULL;
321 goto done;
322 } else {
323 /*
324 * If we already saw the last fragment, make sure
325 * this fragment's offset looks sane. Otherwise, if
326 * this is the last fragment, record its endpoint.
327 */
328 if (fp->ipq_maxoff > 0) {
329 i = ntohs(ip->ip_off) + ntohs(ip->ip_len);
330 if (((m->m_flags & M_IP_FRAG) && i >= fp->ipq_maxoff) ||
331 ((m->m_flags & M_IP_FRAG) == 0 &&
332 i != fp->ipq_maxoff)) {
333 fp = NULL;
334 goto dropfrag;
335 }
336 } else if ((m->m_flags & M_IP_FRAG) == 0)
337 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len);
338 fp->ipq_nfrags++;
339 atomic_add_int(&nfrags, 1);
340 #ifdef MAC
341 mac_ipq_update(m, fp);
342 #endif
343 }
344
345 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.PH_loc.ptr))
346
347 /*
348 * Handle ECN by comparing this segment with the first one;
349 * if CE is set, do not lose CE.
350 * drop if CE and not-ECT are mixed for the same packet.
351 */
352 ecn = ip->ip_tos & IPTOS_ECN_MASK;
353 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
354 if (ecn == IPTOS_ECN_CE) {
355 if (ecn0 == IPTOS_ECN_NOTECT)
356 goto dropfrag;
357 if (ecn0 != IPTOS_ECN_CE)
358 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
359 }
360 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
361 goto dropfrag;
362
363 /*
364 * Find a segment which begins after this one does.
365 */
366 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
367 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off))
368 break;
369
370 /*
371 * If there is a preceding segment, it may provide some of
372 * our data already. If so, drop the data from the incoming
373 * segment. If it provides all of our data, drop us, otherwise
374 * stick new segment in the proper place.
375 *
376 * If some of the data is dropped from the preceding
377 * segment, then it's checksum is invalidated.
378 */
379 if (p) {
380 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) -
381 ntohs(ip->ip_off);
382 if (i > 0) {
383 if (i >= ntohs(ip->ip_len))
384 goto dropfrag;
385 m_adj(m, i);
386 m->m_pkthdr.csum_flags = 0;
387 ip->ip_off = htons(ntohs(ip->ip_off) + i);
388 ip->ip_len = htons(ntohs(ip->ip_len) - i);
389 }
390 m->m_nextpkt = p->m_nextpkt;
391 p->m_nextpkt = m;
392 } else {
393 m->m_nextpkt = fp->ipq_frags;
394 fp->ipq_frags = m;
395 }
396
397 /*
398 * While we overlap succeeding segments trim them or,
399 * if they are completely covered, dequeue them.
400 */
401 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) >
402 ntohs(GETIP(q)->ip_off); q = nq) {
403 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) -
404 ntohs(GETIP(q)->ip_off);
405 if (i < ntohs(GETIP(q)->ip_len)) {
406 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i);
407 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i);
408 m_adj(q, i);
409 q->m_pkthdr.csum_flags = 0;
410 break;
411 }
412 nq = q->m_nextpkt;
413 m->m_nextpkt = nq;
414 IPSTAT_INC(ips_fragdropped);
415 fp->ipq_nfrags--;
416 atomic_subtract_int(&nfrags, 1);
417 m_freem(q);
418 }
419
420 /*
421 * Check for complete reassembly and perform frag per packet
422 * limiting.
423 *
424 * Frag limiting is performed here so that the nth frag has
425 * a chance to complete the packet before we drop the packet.
426 * As a result, n+1 frags are actually allowed per packet, but
427 * only n will ever be stored. (n = maxfragsperpacket.)
428 *
429 */
430 next = 0;
431 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
432 if (ntohs(GETIP(q)->ip_off) != next) {
433 if (fp->ipq_nfrags > V_maxfragsperpacket)
434 ipq_drop(&V_ipq[hash], fp);
435 goto done;
436 }
437 next += ntohs(GETIP(q)->ip_len);
438 }
439 /* Make sure the last packet didn't have the IP_MF flag */
440 if (p->m_flags & M_IP_FRAG) {
441 if (fp->ipq_nfrags > V_maxfragsperpacket)
442 ipq_drop(&V_ipq[hash], fp);
443 goto done;
444 }
445
446 /*
447 * Reassembly is complete. Make sure the packet is a sane size.
448 */
449 q = fp->ipq_frags;
450 ip = GETIP(q);
451 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) {
452 IPSTAT_INC(ips_toolong);
453 ipq_drop(&V_ipq[hash], fp);
454 goto done;
455 }
456
457 /*
458 * Concatenate fragments.
459 */
460 m = q;
461 t = m->m_next;
462 m->m_next = NULL;
463 m_cat(m, t);
464 nq = q->m_nextpkt;
465 q->m_nextpkt = NULL;
466 for (q = nq; q != NULL; q = nq) {
467 nq = q->m_nextpkt;
468 q->m_nextpkt = NULL;
469 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
470 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
471 m_demote_pkthdr(q);
472 m_cat(m, q);
473 }
474 /*
475 * In order to do checksumming faster we do 'end-around carry' here
476 * (and not in for{} loop), though it implies we are not going to
477 * reassemble more than 64k fragments.
478 */
479 while (m->m_pkthdr.csum_data & 0xffff0000)
480 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
481 (m->m_pkthdr.csum_data >> 16);
482 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
483 #ifdef MAC
484 mac_ipq_reassemble(fp, m);
485 mac_ipq_destroy(fp);
486 #endif
487
488 /*
489 * Create header for new ip packet by modifying header of first
490 * packet; dequeue and discard fragment reassembly header.
491 * Make header visible.
492 */
493 ip->ip_len = htons((ip->ip_hl << 2) + next);
494 ip->ip_src = fp->ipq_src;
495 ip->ip_dst = fp->ipq_dst;
496 TAILQ_REMOVE(head, fp, ipq_list);
497 V_ipq[hash].count--;
498 uma_zfree(V_ipq_zone, fp);
499 m->m_len += (ip->ip_hl << 2);
500 m->m_data -= (ip->ip_hl << 2);
501 /* some debugging cruft by sklower, below, will go away soon */
502 if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */
503 m_fixhdr(m);
504 /* set valid receive interface pointer */
505 m->m_pkthdr.rcvif = srcifp;
506 }
507 IPSTAT_INC(ips_reassembled);
508 IPQ_UNLOCK(hash);
509
510 #ifdef RSS
511 /*
512 * Query the RSS layer for the flowid / flowtype for the
513 * mbuf payload.
514 *
515 * For now, just assume we have to calculate a new one.
516 * Later on we should check to see if the assigned flowid matches
517 * what RSS wants for the given IP protocol and if so, just keep it.
518 *
519 * We then queue into the relevant netisr so it can be dispatched
520 * to the correct CPU.
521 *
522 * Note - this may return 1, which means the flowid in the mbuf
523 * is correct for the configured RSS hash types and can be used.
524 */
525 if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) {
526 m->m_pkthdr.flowid = rss_hash;
527 M_HASHTYPE_SET(m, rss_type);
528 }
529
530 /*
531 * Queue/dispatch for reprocessing.
532 *
533 * Note: this is much slower than just handling the frame in the
534 * current receive context. It's likely worth investigating
535 * why this is.
536 */
537 netisr_dispatch(NETISR_IP_DIRECT, m);
538 return (NULL);
539 #endif
540
541 /* Handle in-line */
542 return (m);
543
544 dropfrag:
545 IPSTAT_INC(ips_fragdropped);
546 if (fp != NULL) {
547 fp->ipq_nfrags--;
548 atomic_subtract_int(&nfrags, 1);
549 }
550 m_freem(m);
551 done:
552 IPQ_UNLOCK(hash);
553 return (NULL);
554
555 #undef GETIP
556 }
557
558 /*
559 * Initialize IP reassembly structures.
560 */
561 void
562 ipreass_init(void)
563 {
564 int max;
565
566 for (int i = 0; i < IPREASS_NHASH; i++) {
567 TAILQ_INIT(&V_ipq[i].head);
568 mtx_init(&V_ipq[i].lock, "IP reassembly", NULL,
569 MTX_DEF | MTX_DUPOK);
570 V_ipq[i].count = 0;
571 }
572 V_ipq_hashseed = arc4random();
573 V_maxfragsperpacket = 16;
574 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
575 NULL, UMA_ALIGN_PTR, 0);
576 max = IP_MAXFRAGPACKETS;
577 max = uma_zone_set_max(V_ipq_zone, max);
578 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
579
580 if (IS_DEFAULT_VNET(curvnet)) {
581 maxfrags = IP_MAXFRAGS;
582 EVENTHANDLER_REGISTER(nmbclusters_change, ipreass_zone_change,
583 NULL, EVENTHANDLER_PRI_ANY);
584 }
585 }
586
587 /*
588 * If a timer expires on a reassembly queue, discard it.
589 */
590 void
591 ipreass_slowtimo(void)
592 {
593 struct ipq *fp, *tmp;
594
595 if (atomic_load_int(&nfrags) == 0)
596 return;
597
598 for (int i = 0; i < IPREASS_NHASH; i++) {
599 if (TAILQ_EMPTY(&V_ipq[i].head))
600 continue;
601 IPQ_LOCK(i);
602 TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, tmp)
603 if (--fp->ipq_ttl == 0)
604 ipq_timeout(&V_ipq[i], fp);
605 IPQ_UNLOCK(i);
606 }
607 }
608
609 /*
610 * Drain off all datagram fragments.
611 */
612 void
613 ipreass_drain(void)
614 {
615
616 for (int i = 0; i < IPREASS_NHASH; i++) {
617 IPQ_LOCK(i);
618 while(!TAILQ_EMPTY(&V_ipq[i].head))
619 ipq_drop(&V_ipq[i], TAILQ_FIRST(&V_ipq[i].head));
620 KASSERT(V_ipq[i].count == 0,
621 ("%s: V_ipq[%d] count %d (V_ipq=%p)", __func__, i,
622 V_ipq[i].count, V_ipq));
623 IPQ_UNLOCK(i);
624 }
625 }
626
627 /*
628 * Drain off all datagram fragments belonging to
629 * the given network interface.
630 */
631 static void
632 ipreass_cleanup(void *arg __unused, struct ifnet *ifp)
633 {
634 struct ipq *fp, *temp;
635 struct mbuf *m;
636 int i;
637
638 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
639
640 CURVNET_SET_QUIET(ifp->if_vnet);
641
642 /*
643 * Skip processing if IPv4 reassembly is not initialised or
644 * torn down by ipreass_destroy().
645 */
646 if (V_ipq_zone == NULL) {
647 CURVNET_RESTORE();
648 return;
649 }
650
651 for (i = 0; i < IPREASS_NHASH; i++) {
652 IPQ_LOCK(i);
653 /* Scan fragment list. */
654 TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, temp) {
655 for (m = fp->ipq_frags; m != NULL; m = m->m_nextpkt) {
656 /* clear no longer valid rcvif pointer */
657 if (m->m_pkthdr.rcvif == ifp)
658 m->m_pkthdr.rcvif = NULL;
659 }
660 }
661 IPQ_UNLOCK(i);
662 }
663 CURVNET_RESTORE();
664 }
665 EVENTHANDLER_DEFINE(ifnet_departure_event, ipreass_cleanup, NULL, 0);
666
667 #ifdef VIMAGE
668 /*
669 * Destroy IP reassembly structures.
670 */
671 void
672 ipreass_destroy(void)
673 {
674
675 ipreass_drain();
676 uma_zdestroy(V_ipq_zone);
677 V_ipq_zone = NULL;
678 for (int i = 0; i < IPREASS_NHASH; i++)
679 mtx_destroy(&V_ipq[i].lock);
680 }
681 #endif
682
683 /*
684 * After maxnipq has been updated, propagate the change to UMA. The UMA zone
685 * max has slightly different semantics than the sysctl, for historical
686 * reasons.
687 */
688 static void
689 ipreass_drain_tomax(void)
690 {
691 struct ipq *fp;
692 int target;
693
694 /*
695 * Make sure each bucket is under the new limit. If
696 * necessary, drop enough of the oldest elements from
697 * each bucket to get under the new limit.
698 */
699 for (int i = 0; i < IPREASS_NHASH; i++) {
700 IPQ_LOCK(i);
701 while (V_ipq[i].count > V_ipreass_maxbucketsize &&
702 (fp = TAILQ_LAST(&V_ipq[i].head, ipqhead)) != NULL)
703 ipq_timeout(&V_ipq[i], fp);
704 IPQ_UNLOCK(i);
705 }
706
707 /*
708 * If we are over the maximum number of fragments,
709 * drain off enough to get down to the new limit,
710 * stripping off last elements on queues. Every
711 * run we strip the oldest element from each bucket.
712 */
713 target = uma_zone_get_max(V_ipq_zone);
714 while (uma_zone_get_cur(V_ipq_zone) > target) {
715 for (int i = 0; i < IPREASS_NHASH; i++) {
716 IPQ_LOCK(i);
717 fp = TAILQ_LAST(&V_ipq[i].head, ipqhead);
718 if (fp != NULL)
719 ipq_timeout(&V_ipq[i], fp);
720 IPQ_UNLOCK(i);
721 }
722 }
723 }
724
725 static void
726 ipreass_zone_change(void *tag)
727 {
728 VNET_ITERATOR_DECL(vnet_iter);
729 int max;
730
731 maxfrags = IP_MAXFRAGS;
732 max = IP_MAXFRAGPACKETS;
733 VNET_LIST_RLOCK_NOSLEEP();
734 VNET_FOREACH(vnet_iter) {
735 CURVNET_SET(vnet_iter);
736 max = uma_zone_set_max(V_ipq_zone, max);
737 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
738 ipreass_drain_tomax();
739 CURVNET_RESTORE();
740 }
741 VNET_LIST_RUNLOCK_NOSLEEP();
742 }
743
744 /*
745 * Change the limit on the UMA zone, or disable the fragment allocation
746 * at all. Since 0 and -1 is a special values here, we need our own handler,
747 * instead of sysctl_handle_uma_zone_max().
748 */
749 static int
750 sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS)
751 {
752 int error, max;
753
754 if (V_noreass == 0) {
755 max = uma_zone_get_max(V_ipq_zone);
756 if (max == 0)
757 max = -1;
758 } else
759 max = 0;
760 error = sysctl_handle_int(oidp, &max, 0, req);
761 if (error || !req->newptr)
762 return (error);
763 if (max > 0) {
764 /*
765 * XXXRW: Might be a good idea to sanity check the argument
766 * and place an extreme upper bound.
767 */
768 max = uma_zone_set_max(V_ipq_zone, max);
769 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
770 ipreass_drain_tomax();
771 V_noreass = 0;
772 } else if (max == 0) {
773 V_noreass = 1;
774 ipreass_drain();
775 } else if (max == -1) {
776 V_noreass = 0;
777 uma_zone_set_max(V_ipq_zone, 0);
778 V_ipreass_maxbucketsize = INT_MAX;
779 } else
780 return (EINVAL);
781 return (0);
782 }
783
784 /*
785 * Seek for old fragment queue header that can be reused. Try to
786 * reuse a header from currently locked hash bucket.
787 */
788 static struct ipq *
789 ipq_reuse(int start)
790 {
791 struct ipq *fp;
792 int bucket, i;
793
794 IPQ_LOCK_ASSERT(start);
795
796 for (i = 0; i < IPREASS_NHASH; i++) {
797 bucket = (start + i) % IPREASS_NHASH;
798 if (bucket != start && IPQ_TRYLOCK(bucket) == 0)
799 continue;
800 fp = TAILQ_LAST(&V_ipq[bucket].head, ipqhead);
801 if (fp) {
802 struct mbuf *m;
803
804 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags);
805 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
806 while (fp->ipq_frags) {
807 m = fp->ipq_frags;
808 fp->ipq_frags = m->m_nextpkt;
809 m_freem(m);
810 }
811 TAILQ_REMOVE(&V_ipq[bucket].head, fp, ipq_list);
812 V_ipq[bucket].count--;
813 if (bucket != start)
814 IPQ_UNLOCK(bucket);
815 break;
816 }
817 if (bucket != start)
818 IPQ_UNLOCK(bucket);
819 }
820 IPQ_LOCK_ASSERT(start);
821 return (fp);
822 }
823
824 /*
825 * Free a fragment reassembly header and all associated datagrams.
826 */
827 static void
828 ipq_free(struct ipqbucket *bucket, struct ipq *fp)
829 {
830 struct mbuf *q;
831
832 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
833 while (fp->ipq_frags) {
834 q = fp->ipq_frags;
835 fp->ipq_frags = q->m_nextpkt;
836 m_freem(q);
837 }
838 TAILQ_REMOVE(&bucket->head, fp, ipq_list);
839 bucket->count--;
840 uma_zfree(V_ipq_zone, fp);
841 }
842
843 /*
844 * Get or set the maximum number of reassembly queues per bucket.
845 */
846 static int
847 sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS)
848 {
849 int error, max;
850
851 max = V_ipreass_maxbucketsize;
852 error = sysctl_handle_int(oidp, &max, 0, req);
853 if (error || !req->newptr)
854 return (error);
855 if (max <= 0)
856 return (EINVAL);
857 V_ipreass_maxbucketsize = max;
858 ipreass_drain_tomax();
859 return (0);
860 }
Cache object: 6a6b1dd02b2efce2ef59977aac3d4875
|