1 /*-
2 * Copyright (c) 2015 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org>
4 * Copyright (c) 1982, 1986, 1988, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_rss.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/eventhandler.h>
42 #include <sys/hash.h>
43 #include <sys/mbuf.h>
44 #include <sys/malloc.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49
50 #include <net/rss_config.h>
51 #include <net/netisr.h>
52 #include <net/vnet.h>
53
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/in_rss.h>
58 #ifdef MAC
59 #include <security/mac/mac_framework.h>
60 #endif
61
62 SYSCTL_DECL(_net_inet_ip);
63
64 /*
65 * Reassembly headers are stored in hash buckets.
66 */
67 #define IPREASS_NHASH_LOG2 10
68 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
69 #define IPREASS_HMASK (IPREASS_NHASH - 1)
70
71 struct ipqbucket {
72 TAILQ_HEAD(ipqhead, ipq) head;
73 struct mtx lock;
74 int count;
75 };
76
77 static VNET_DEFINE(struct ipqbucket, ipq[IPREASS_NHASH]);
78 #define V_ipq VNET(ipq)
79 static VNET_DEFINE(uint32_t, ipq_hashseed);
80 #define V_ipq_hashseed VNET(ipq_hashseed)
81
82 #define IPQ_LOCK(i) mtx_lock(&V_ipq[i].lock)
83 #define IPQ_TRYLOCK(i) mtx_trylock(&V_ipq[i].lock)
84 #define IPQ_UNLOCK(i) mtx_unlock(&V_ipq[i].lock)
85 #define IPQ_LOCK_ASSERT(i) mtx_assert(&V_ipq[i].lock, MA_OWNED)
86
87 static VNET_DEFINE(int, ipreass_maxbucketsize);
88 #define V_ipreass_maxbucketsize VNET(ipreass_maxbucketsize)
89
90 void ipreass_init(void);
91 void ipreass_drain(void);
92 void ipreass_slowtimo(void);
93 #ifdef VIMAGE
94 void ipreass_destroy(void);
95 #endif
96 static int sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS);
97 static int sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS);
98 static void ipreass_zone_change(void *);
99 static void ipreass_drain_tomax(void);
100 static void ipq_free(struct ipqbucket *, struct ipq *);
101 static struct ipq * ipq_reuse(int);
102
103 static inline void
104 ipq_timeout(struct ipqbucket *bucket, struct ipq *fp)
105 {
106
107 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags);
108 ipq_free(bucket, fp);
109 }
110
111 static inline void
112 ipq_drop(struct ipqbucket *bucket, struct ipq *fp)
113 {
114
115 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
116 ipq_free(bucket, fp);
117 }
118
119 /*
120 * By default, limit the number of IP fragments across all reassembly
121 * queues to 1/32 of the total number of mbuf clusters.
122 *
123 * Limit the total number of reassembly queues per VNET to the
124 * IP fragment limit, but ensure the limit will not allow any bucket
125 * to grow above 100 items. (The bucket limit is
126 * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
127 * multiplier to reach a 100-item limit.)
128 * The 100-item limit was chosen as brief testing seems to show that
129 * this produces "reasonable" performance on some subset of systems
130 * under DoS attack.
131 */
132 #define IP_MAXFRAGS (nmbclusters / 32)
133 #define IP_MAXFRAGPACKETS (imin(IP_MAXFRAGS, IPREASS_NHASH * 50))
134
135 static int maxfrags;
136 static volatile u_int nfrags;
137 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfrags, CTLFLAG_RW,
138 &maxfrags, 0,
139 "Maximum number of IPv4 fragments allowed across all reassembly queues");
140 SYSCTL_UINT(_net_inet_ip, OID_AUTO, curfrags, CTLFLAG_RD,
141 __DEVOLATILE(u_int *, &nfrags), 0,
142 "Current number of IPv4 fragments across all reassembly queues");
143
144 static VNET_DEFINE(uma_zone_t, ipq_zone);
145 #define V_ipq_zone VNET(ipq_zone)
146 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_VNET |
147 CTLTYPE_INT | CTLFLAG_RW, NULL, 0, sysctl_maxfragpackets, "I",
148 "Maximum number of IPv4 fragment reassembly queue entries");
149 SYSCTL_UMA_CUR(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_VNET,
150 &VNET_NAME(ipq_zone),
151 "Current number of IPv4 fragment reassembly queue entries");
152
153 static VNET_DEFINE(int, noreass);
154 #define V_noreass VNET(noreass)
155
156 static VNET_DEFINE(int, maxfragsperpacket);
157 #define V_maxfragsperpacket VNET(maxfragsperpacket)
158 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_VNET | CTLFLAG_RW,
159 &VNET_NAME(maxfragsperpacket), 0,
160 "Maximum number of IPv4 fragments allowed per packet");
161 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragbucketsize,
162 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0,
163 sysctl_maxfragbucketsize, "I",
164 "Maximum number of IPv4 fragment reassembly queue entries per bucket");
165
166 /*
167 * Take incoming datagram fragment and try to reassemble it into
168 * whole datagram. If the argument is the first fragment or one
169 * in between the function will return NULL and store the mbuf
170 * in the fragment chain. If the argument is the last fragment
171 * the packet will be reassembled and the pointer to the new
172 * mbuf returned for further processing. Only m_tags attached
173 * to the first packet/fragment are preserved.
174 * The IP header is *NOT* adjusted out of iplen.
175 */
176 #define M_IP_FRAG M_PROTO9
177 struct mbuf *
178 ip_reass(struct mbuf *m)
179 {
180 struct ip *ip;
181 struct mbuf *p, *q, *nq, *t;
182 struct ipq *fp;
183 struct ipqhead *head;
184 int i, hlen, next, tmpmax;
185 u_int8_t ecn, ecn0;
186 uint32_t hash, hashkey[3];
187 #ifdef RSS
188 uint32_t rss_hash, rss_type;
189 #endif
190
191 /*
192 * If no reassembling or maxfragsperpacket are 0,
193 * never accept fragments.
194 * Also, drop packet if it would exceed the maximum
195 * number of fragments.
196 */
197 tmpmax = maxfrags;
198 if (V_noreass == 1 || V_maxfragsperpacket == 0 ||
199 (tmpmax >= 0 && nfrags >= (u_int)tmpmax)) {
200 IPSTAT_INC(ips_fragments);
201 IPSTAT_INC(ips_fragdropped);
202 m_freem(m);
203 return (NULL);
204 }
205
206 ip = mtod(m, struct ip *);
207 hlen = ip->ip_hl << 2;
208
209 /*
210 * Adjust ip_len to not reflect header,
211 * convert offset of this to bytes.
212 */
213 ip->ip_len = htons(ntohs(ip->ip_len) - hlen);
214 /*
215 * Make sure that fragments have a data length
216 * that's a non-zero multiple of 8 bytes, unless
217 * this is the last fragment.
218 */
219 if (ip->ip_len == htons(0) ||
220 ((ip->ip_off & htons(IP_MF)) && (ntohs(ip->ip_len) & 0x7) != 0)) {
221 IPSTAT_INC(ips_toosmall); /* XXX */
222 IPSTAT_INC(ips_fragdropped);
223 m_freem(m);
224 return (NULL);
225 }
226 if (ip->ip_off & htons(IP_MF))
227 m->m_flags |= M_IP_FRAG;
228 else
229 m->m_flags &= ~M_IP_FRAG;
230 ip->ip_off = htons(ntohs(ip->ip_off) << 3);
231
232 /*
233 * Attempt reassembly; if it succeeds, proceed.
234 * ip_reass() will return a different mbuf.
235 */
236 IPSTAT_INC(ips_fragments);
237 m->m_pkthdr.PH_loc.ptr = ip;
238
239 /*
240 * Presence of header sizes in mbufs
241 * would confuse code below.
242 */
243 m->m_data += hlen;
244 m->m_len -= hlen;
245
246 hashkey[0] = ip->ip_src.s_addr;
247 hashkey[1] = ip->ip_dst.s_addr;
248 hashkey[2] = (uint32_t)ip->ip_p << 16;
249 hashkey[2] += ip->ip_id;
250 hash = jenkins_hash32(hashkey, nitems(hashkey), V_ipq_hashseed);
251 hash &= IPREASS_HMASK;
252 head = &V_ipq[hash].head;
253 IPQ_LOCK(hash);
254
255 /*
256 * Look for queue of fragments
257 * of this datagram.
258 */
259 TAILQ_FOREACH(fp, head, ipq_list)
260 if (ip->ip_id == fp->ipq_id &&
261 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
262 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
263 #ifdef MAC
264 mac_ipq_match(m, fp) &&
265 #endif
266 ip->ip_p == fp->ipq_p)
267 break;
268 /*
269 * If first fragment to arrive, create a reassembly queue.
270 */
271 if (fp == NULL) {
272 if (V_ipq[hash].count < V_ipreass_maxbucketsize)
273 fp = uma_zalloc(V_ipq_zone, M_NOWAIT);
274 if (fp == NULL)
275 fp = ipq_reuse(hash);
276 if (fp == NULL)
277 goto dropfrag;
278 #ifdef MAC
279 if (mac_ipq_init(fp, M_NOWAIT) != 0) {
280 uma_zfree(V_ipq_zone, fp);
281 fp = NULL;
282 goto dropfrag;
283 }
284 mac_ipq_create(m, fp);
285 #endif
286 TAILQ_INSERT_HEAD(head, fp, ipq_list);
287 V_ipq[hash].count++;
288 fp->ipq_nfrags = 1;
289 atomic_add_int(&nfrags, 1);
290 fp->ipq_ttl = IPFRAGTTL;
291 fp->ipq_p = ip->ip_p;
292 fp->ipq_id = ip->ip_id;
293 fp->ipq_src = ip->ip_src;
294 fp->ipq_dst = ip->ip_dst;
295 fp->ipq_frags = m;
296 if (m->m_flags & M_IP_FRAG)
297 fp->ipq_maxoff = -1;
298 else
299 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len);
300 m->m_nextpkt = NULL;
301 goto done;
302 } else {
303 /*
304 * If we already saw the last fragment, make sure
305 * this fragment's offset looks sane. Otherwise, if
306 * this is the last fragment, record its endpoint.
307 */
308 if (fp->ipq_maxoff > 0) {
309 i = ntohs(ip->ip_off) + ntohs(ip->ip_len);
310 if (((m->m_flags & M_IP_FRAG) && i >= fp->ipq_maxoff) ||
311 ((m->m_flags & M_IP_FRAG) == 0 &&
312 i != fp->ipq_maxoff)) {
313 fp = NULL;
314 goto dropfrag;
315 }
316 } else if ((m->m_flags & M_IP_FRAG) == 0)
317 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len);
318 fp->ipq_nfrags++;
319 atomic_add_int(&nfrags, 1);
320 #ifdef MAC
321 mac_ipq_update(m, fp);
322 #endif
323 }
324
325 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.PH_loc.ptr))
326
327 /*
328 * Handle ECN by comparing this segment with the first one;
329 * if CE is set, do not lose CE.
330 * drop if CE and not-ECT are mixed for the same packet.
331 */
332 ecn = ip->ip_tos & IPTOS_ECN_MASK;
333 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
334 if (ecn == IPTOS_ECN_CE) {
335 if (ecn0 == IPTOS_ECN_NOTECT)
336 goto dropfrag;
337 if (ecn0 != IPTOS_ECN_CE)
338 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
339 }
340 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
341 goto dropfrag;
342
343 /*
344 * Find a segment which begins after this one does.
345 */
346 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
347 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off))
348 break;
349
350 /*
351 * If there is a preceding segment, it may provide some of
352 * our data already. If so, drop the data from the incoming
353 * segment. If it provides all of our data, drop us, otherwise
354 * stick new segment in the proper place.
355 *
356 * If some of the data is dropped from the preceding
357 * segment, then it's checksum is invalidated.
358 */
359 if (p) {
360 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) -
361 ntohs(ip->ip_off);
362 if (i > 0) {
363 if (i >= ntohs(ip->ip_len))
364 goto dropfrag;
365 m_adj(m, i);
366 m->m_pkthdr.csum_flags = 0;
367 ip->ip_off = htons(ntohs(ip->ip_off) + i);
368 ip->ip_len = htons(ntohs(ip->ip_len) - i);
369 }
370 m->m_nextpkt = p->m_nextpkt;
371 p->m_nextpkt = m;
372 } else {
373 m->m_nextpkt = fp->ipq_frags;
374 fp->ipq_frags = m;
375 }
376
377 /*
378 * While we overlap succeeding segments trim them or,
379 * if they are completely covered, dequeue them.
380 */
381 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) >
382 ntohs(GETIP(q)->ip_off); q = nq) {
383 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) -
384 ntohs(GETIP(q)->ip_off);
385 if (i < ntohs(GETIP(q)->ip_len)) {
386 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i);
387 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i);
388 m_adj(q, i);
389 q->m_pkthdr.csum_flags = 0;
390 break;
391 }
392 nq = q->m_nextpkt;
393 m->m_nextpkt = nq;
394 IPSTAT_INC(ips_fragdropped);
395 fp->ipq_nfrags--;
396 atomic_subtract_int(&nfrags, 1);
397 m_freem(q);
398 }
399
400 /*
401 * Check for complete reassembly and perform frag per packet
402 * limiting.
403 *
404 * Frag limiting is performed here so that the nth frag has
405 * a chance to complete the packet before we drop the packet.
406 * As a result, n+1 frags are actually allowed per packet, but
407 * only n will ever be stored. (n = maxfragsperpacket.)
408 *
409 */
410 next = 0;
411 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
412 if (ntohs(GETIP(q)->ip_off) != next) {
413 if (fp->ipq_nfrags > V_maxfragsperpacket)
414 ipq_drop(&V_ipq[hash], fp);
415 goto done;
416 }
417 next += ntohs(GETIP(q)->ip_len);
418 }
419 /* Make sure the last packet didn't have the IP_MF flag */
420 if (p->m_flags & M_IP_FRAG) {
421 if (fp->ipq_nfrags > V_maxfragsperpacket)
422 ipq_drop(&V_ipq[hash], fp);
423 goto done;
424 }
425
426 /*
427 * Reassembly is complete. Make sure the packet is a sane size.
428 */
429 q = fp->ipq_frags;
430 ip = GETIP(q);
431 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) {
432 IPSTAT_INC(ips_toolong);
433 ipq_drop(&V_ipq[hash], fp);
434 goto done;
435 }
436
437 /*
438 * Concatenate fragments.
439 */
440 m = q;
441 t = m->m_next;
442 m->m_next = NULL;
443 m_cat(m, t);
444 nq = q->m_nextpkt;
445 q->m_nextpkt = NULL;
446 for (q = nq; q != NULL; q = nq) {
447 nq = q->m_nextpkt;
448 q->m_nextpkt = NULL;
449 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
450 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
451 m_demote_pkthdr(q);
452 m_cat(m, q);
453 }
454 /*
455 * In order to do checksumming faster we do 'end-around carry' here
456 * (and not in for{} loop), though it implies we are not going to
457 * reassemble more than 64k fragments.
458 */
459 while (m->m_pkthdr.csum_data & 0xffff0000)
460 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
461 (m->m_pkthdr.csum_data >> 16);
462 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
463 #ifdef MAC
464 mac_ipq_reassemble(fp, m);
465 mac_ipq_destroy(fp);
466 #endif
467
468 /*
469 * Create header for new ip packet by modifying header of first
470 * packet; dequeue and discard fragment reassembly header.
471 * Make header visible.
472 */
473 ip->ip_len = htons((ip->ip_hl << 2) + next);
474 ip->ip_src = fp->ipq_src;
475 ip->ip_dst = fp->ipq_dst;
476 TAILQ_REMOVE(head, fp, ipq_list);
477 V_ipq[hash].count--;
478 uma_zfree(V_ipq_zone, fp);
479 m->m_len += (ip->ip_hl << 2);
480 m->m_data -= (ip->ip_hl << 2);
481 /* some debugging cruft by sklower, below, will go away soon */
482 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
483 m_fixhdr(m);
484 IPSTAT_INC(ips_reassembled);
485 IPQ_UNLOCK(hash);
486
487 #ifdef RSS
488 /*
489 * Query the RSS layer for the flowid / flowtype for the
490 * mbuf payload.
491 *
492 * For now, just assume we have to calculate a new one.
493 * Later on we should check to see if the assigned flowid matches
494 * what RSS wants for the given IP protocol and if so, just keep it.
495 *
496 * We then queue into the relevant netisr so it can be dispatched
497 * to the correct CPU.
498 *
499 * Note - this may return 1, which means the flowid in the mbuf
500 * is correct for the configured RSS hash types and can be used.
501 */
502 if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) {
503 m->m_pkthdr.flowid = rss_hash;
504 M_HASHTYPE_SET(m, rss_type);
505 }
506
507 /*
508 * Queue/dispatch for reprocessing.
509 *
510 * Note: this is much slower than just handling the frame in the
511 * current receive context. It's likely worth investigating
512 * why this is.
513 */
514 netisr_dispatch(NETISR_IP_DIRECT, m);
515 return (NULL);
516 #endif
517
518 /* Handle in-line */
519 return (m);
520
521 dropfrag:
522 IPSTAT_INC(ips_fragdropped);
523 if (fp != NULL) {
524 fp->ipq_nfrags--;
525 atomic_subtract_int(&nfrags, 1);
526 }
527 m_freem(m);
528 done:
529 IPQ_UNLOCK(hash);
530 return (NULL);
531
532 #undef GETIP
533 }
534
535 /*
536 * Initialize IP reassembly structures.
537 */
538 void
539 ipreass_init(void)
540 {
541 int max;
542
543 for (int i = 0; i < IPREASS_NHASH; i++) {
544 TAILQ_INIT(&V_ipq[i].head);
545 mtx_init(&V_ipq[i].lock, "IP reassembly", NULL,
546 MTX_DEF | MTX_DUPOK);
547 V_ipq[i].count = 0;
548 }
549 V_ipq_hashseed = arc4random();
550 V_maxfragsperpacket = 16;
551 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
552 NULL, UMA_ALIGN_PTR, 0);
553 max = IP_MAXFRAGPACKETS;
554 max = uma_zone_set_max(V_ipq_zone, max);
555 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
556
557 if (IS_DEFAULT_VNET(curvnet)) {
558 maxfrags = IP_MAXFRAGS;
559 EVENTHANDLER_REGISTER(nmbclusters_change, ipreass_zone_change,
560 NULL, EVENTHANDLER_PRI_ANY);
561 }
562 }
563
564 /*
565 * If a timer expires on a reassembly queue, discard it.
566 */
567 void
568 ipreass_slowtimo(void)
569 {
570 struct ipq *fp, *tmp;
571
572 for (int i = 0; i < IPREASS_NHASH; i++) {
573 IPQ_LOCK(i);
574 TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, tmp)
575 if (--fp->ipq_ttl == 0)
576 ipq_timeout(&V_ipq[i], fp);
577 IPQ_UNLOCK(i);
578 }
579 }
580
581 /*
582 * Drain off all datagram fragments.
583 */
584 void
585 ipreass_drain(void)
586 {
587
588 for (int i = 0; i < IPREASS_NHASH; i++) {
589 IPQ_LOCK(i);
590 while(!TAILQ_EMPTY(&V_ipq[i].head))
591 ipq_drop(&V_ipq[i], TAILQ_FIRST(&V_ipq[i].head));
592 KASSERT(V_ipq[i].count == 0,
593 ("%s: V_ipq[%d] count %d (V_ipq=%p)", __func__, i,
594 V_ipq[i].count, V_ipq));
595 IPQ_UNLOCK(i);
596 }
597 }
598
599 #ifdef VIMAGE
600 /*
601 * Destroy IP reassembly structures.
602 */
603 void
604 ipreass_destroy(void)
605 {
606
607 ipreass_drain();
608 uma_zdestroy(V_ipq_zone);
609 for (int i = 0; i < IPREASS_NHASH; i++)
610 mtx_destroy(&V_ipq[i].lock);
611 }
612 #endif
613
614 /*
615 * After maxnipq has been updated, propagate the change to UMA. The UMA zone
616 * max has slightly different semantics than the sysctl, for historical
617 * reasons.
618 */
619 static void
620 ipreass_drain_tomax(void)
621 {
622 struct ipq *fp;
623 int target;
624
625 /*
626 * Make sure each bucket is under the new limit. If
627 * necessary, drop enough of the oldest elements from
628 * each bucket to get under the new limit.
629 */
630 for (int i = 0; i < IPREASS_NHASH; i++) {
631 IPQ_LOCK(i);
632 while (V_ipq[i].count > V_ipreass_maxbucketsize &&
633 (fp = TAILQ_LAST(&V_ipq[i].head, ipqhead)) != NULL)
634 ipq_timeout(&V_ipq[i], fp);
635 IPQ_UNLOCK(i);
636 }
637
638 /*
639 * If we are over the maximum number of fragments,
640 * drain off enough to get down to the new limit,
641 * stripping off last elements on queues. Every
642 * run we strip the oldest element from each bucket.
643 */
644 target = uma_zone_get_max(V_ipq_zone);
645 while (uma_zone_get_cur(V_ipq_zone) > target) {
646 for (int i = 0; i < IPREASS_NHASH; i++) {
647 IPQ_LOCK(i);
648 fp = TAILQ_LAST(&V_ipq[i].head, ipqhead);
649 if (fp != NULL)
650 ipq_timeout(&V_ipq[i], fp);
651 IPQ_UNLOCK(i);
652 }
653 }
654 }
655
656 static void
657 ipreass_zone_change(void *tag)
658 {
659 VNET_ITERATOR_DECL(vnet_iter);
660 int max;
661
662 maxfrags = IP_MAXFRAGS;
663 max = IP_MAXFRAGPACKETS;
664 VNET_LIST_RLOCK_NOSLEEP();
665 VNET_FOREACH(vnet_iter) {
666 CURVNET_SET(vnet_iter);
667 max = uma_zone_set_max(V_ipq_zone, max);
668 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
669 ipreass_drain_tomax();
670 CURVNET_RESTORE();
671 }
672 VNET_LIST_RUNLOCK_NOSLEEP();
673 }
674
675 /*
676 * Change the limit on the UMA zone, or disable the fragment allocation
677 * at all. Since 0 and -1 is a special values here, we need our own handler,
678 * instead of sysctl_handle_uma_zone_max().
679 */
680 static int
681 sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS)
682 {
683 int error, max;
684
685 if (V_noreass == 0) {
686 max = uma_zone_get_max(V_ipq_zone);
687 if (max == 0)
688 max = -1;
689 } else
690 max = 0;
691 error = sysctl_handle_int(oidp, &max, 0, req);
692 if (error || !req->newptr)
693 return (error);
694 if (max > 0) {
695 /*
696 * XXXRW: Might be a good idea to sanity check the argument
697 * and place an extreme upper bound.
698 */
699 max = uma_zone_set_max(V_ipq_zone, max);
700 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
701 ipreass_drain_tomax();
702 V_noreass = 0;
703 } else if (max == 0) {
704 V_noreass = 1;
705 ipreass_drain();
706 } else if (max == -1) {
707 V_noreass = 0;
708 uma_zone_set_max(V_ipq_zone, 0);
709 V_ipreass_maxbucketsize = INT_MAX;
710 } else
711 return (EINVAL);
712 return (0);
713 }
714
715 /*
716 * Seek for old fragment queue header that can be reused. Try to
717 * reuse a header from currently locked hash bucket.
718 */
719 static struct ipq *
720 ipq_reuse(int start)
721 {
722 struct ipq *fp;
723 int bucket, i;
724
725 IPQ_LOCK_ASSERT(start);
726
727 for (i = 0; i < IPREASS_NHASH; i++) {
728 bucket = (start + i) % IPREASS_NHASH;
729 if (bucket != start && IPQ_TRYLOCK(bucket) == 0)
730 continue;
731 fp = TAILQ_LAST(&V_ipq[bucket].head, ipqhead);
732 if (fp) {
733 struct mbuf *m;
734
735 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags);
736 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
737 while (fp->ipq_frags) {
738 m = fp->ipq_frags;
739 fp->ipq_frags = m->m_nextpkt;
740 m_freem(m);
741 }
742 TAILQ_REMOVE(&V_ipq[bucket].head, fp, ipq_list);
743 V_ipq[bucket].count--;
744 if (bucket != start)
745 IPQ_UNLOCK(bucket);
746 break;
747 }
748 if (bucket != start)
749 IPQ_UNLOCK(bucket);
750 }
751 IPQ_LOCK_ASSERT(start);
752 return (fp);
753 }
754
755 /*
756 * Free a fragment reassembly header and all associated datagrams.
757 */
758 static void
759 ipq_free(struct ipqbucket *bucket, struct ipq *fp)
760 {
761 struct mbuf *q;
762
763 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
764 while (fp->ipq_frags) {
765 q = fp->ipq_frags;
766 fp->ipq_frags = q->m_nextpkt;
767 m_freem(q);
768 }
769 TAILQ_REMOVE(&bucket->head, fp, ipq_list);
770 bucket->count--;
771 uma_zfree(V_ipq_zone, fp);
772 }
773
774 /*
775 * Get or set the maximum number of reassembly queues per bucket.
776 */
777 static int
778 sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS)
779 {
780 int error, max;
781
782 max = V_ipreass_maxbucketsize;
783 error = sysctl_handle_int(oidp, &max, 0, req);
784 if (error || !req->newptr)
785 return (error);
786 if (max <= 0)
787 return (EINVAL);
788 V_ipreass_maxbucketsize = max;
789 ipreass_drain_tomax();
790 return (0);
791 }
Cache object: ef65f2050ea25ee0b188e9412254c918
|