1 /*-
2 * Copyright (c) 2015 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org>
4 * Copyright (c) 1982, 1986, 1988, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/11.2/sys/netinet/ip_reass.c 337828 2018-08-15 02:30:11Z delphij $");
36
37 #include "opt_rss.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/eventhandler.h>
42 #include <sys/hash.h>
43 #include <sys/mbuf.h>
44 #include <sys/malloc.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49
50 #include <net/rss_config.h>
51 #include <net/netisr.h>
52 #include <net/vnet.h>
53
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/in_rss.h>
58 #ifdef MAC
59 #include <security/mac/mac_framework.h>
60 #endif
61
62 SYSCTL_DECL(_net_inet_ip);
63
64 /*
65 * Reassembly headers are stored in hash buckets.
66 */
67 #define IPREASS_NHASH_LOG2 10
68 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
69 #define IPREASS_HMASK (IPREASS_NHASH - 1)
70
71 struct ipqbucket {
72 TAILQ_HEAD(ipqhead, ipq) head;
73 struct mtx lock;
74 int count;
75 };
76
77 static VNET_DEFINE(struct ipqbucket, ipq[IPREASS_NHASH]);
78 #define V_ipq VNET(ipq)
79 static VNET_DEFINE(uint32_t, ipq_hashseed);
80 #define V_ipq_hashseed VNET(ipq_hashseed)
81
82 #define IPQ_LOCK(i) mtx_lock(&V_ipq[i].lock)
83 #define IPQ_TRYLOCK(i) mtx_trylock(&V_ipq[i].lock)
84 #define IPQ_UNLOCK(i) mtx_unlock(&V_ipq[i].lock)
85 #define IPQ_LOCK_ASSERT(i) mtx_assert(&V_ipq[i].lock, MA_OWNED)
86
87 static VNET_DEFINE(int, ipreass_maxbucketsize);
88 #define V_ipreass_maxbucketsize VNET(ipreass_maxbucketsize)
89
90 void ipreass_init(void);
91 void ipreass_drain(void);
92 void ipreass_slowtimo(void);
93 #ifdef VIMAGE
94 void ipreass_destroy(void);
95 #endif
96 static int sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS);
97 static int sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS);
98 static void ipreass_zone_change(void *);
99 static void ipreass_drain_tomax(void);
100 static void ipq_free(struct ipqbucket *, struct ipq *);
101 static struct ipq * ipq_reuse(int);
102
103 static inline void
104 ipq_timeout(struct ipqbucket *bucket, struct ipq *fp)
105 {
106
107 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags);
108 ipq_free(bucket, fp);
109 }
110
111 static inline void
112 ipq_drop(struct ipqbucket *bucket, struct ipq *fp)
113 {
114
115 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
116 ipq_free(bucket, fp);
117 }
118
119 /*
120 * By default, limit the number of IP fragments across all reassembly
121 * queues to 1/32 of the total number of mbuf clusters.
122 *
123 * Limit the total number of reassembly queues per VNET to the
124 * IP fragment limit, but ensure the limit will not allow any bucket
125 * to grow above 100 items. (The bucket limit is
126 * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
127 * multiplier to reach a 100-item limit.)
128 * The 100-item limit was chosen as brief testing seems to show that
129 * this produces "reasonable" performance on some subset of systems
130 * under DoS attack.
131 */
132 #define IP_MAXFRAGS (nmbclusters / 32)
133 #define IP_MAXFRAGPACKETS (imin(IP_MAXFRAGS, IPREASS_NHASH * 50))
134
135 static int maxfrags;
136 static volatile u_int nfrags;
137 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfrags, CTLFLAG_RW,
138 &maxfrags, 0,
139 "Maximum number of IPv4 fragments allowed across all reassembly queues");
140 SYSCTL_UINT(_net_inet_ip, OID_AUTO, curfrags, CTLFLAG_RD,
141 __DEVOLATILE(u_int *, &nfrags), 0,
142 "Current number of IPv4 fragments across all reassembly queues");
143
144 static VNET_DEFINE(uma_zone_t, ipq_zone);
145 #define V_ipq_zone VNET(ipq_zone)
146 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_VNET |
147 CTLTYPE_INT | CTLFLAG_RW, NULL, 0, sysctl_maxfragpackets, "I",
148 "Maximum number of IPv4 fragment reassembly queue entries");
149 SYSCTL_UMA_CUR(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_VNET,
150 &VNET_NAME(ipq_zone),
151 "Current number of IPv4 fragment reassembly queue entries");
152
153 static VNET_DEFINE(int, noreass);
154 #define V_noreass VNET(noreass)
155
156 static VNET_DEFINE(int, maxfragsperpacket);
157 #define V_maxfragsperpacket VNET(maxfragsperpacket)
158 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_VNET | CTLFLAG_RW,
159 &VNET_NAME(maxfragsperpacket), 0,
160 "Maximum number of IPv4 fragments allowed per packet");
161 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragbucketsize,
162 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0,
163 sysctl_maxfragbucketsize, "I",
164 "Maximum number of IPv4 fragment reassembly queue entries per bucket");
165
166 /*
167 * Take incoming datagram fragment and try to reassemble it into
168 * whole datagram. If the argument is the first fragment or one
169 * in between the function will return NULL and store the mbuf
170 * in the fragment chain. If the argument is the last fragment
171 * the packet will be reassembled and the pointer to the new
172 * mbuf returned for further processing. Only m_tags attached
173 * to the first packet/fragment are preserved.
174 * The IP header is *NOT* adjusted out of iplen.
175 */
176 #define M_IP_FRAG M_PROTO9
177 struct mbuf *
178 ip_reass(struct mbuf *m)
179 {
180 struct ip *ip;
181 struct mbuf *p, *q, *nq, *t;
182 struct ipq *fp;
183 struct ipqhead *head;
184 int i, hlen, next, tmpmax;
185 u_int8_t ecn, ecn0;
186 uint32_t hash, hashkey[3];
187 #ifdef RSS
188 uint32_t rss_hash, rss_type;
189 #endif
190
191 /*
192 * If no reassembling or maxfragsperpacket are 0,
193 * never accept fragments.
194 * Also, drop packet if it would exceed the maximum
195 * number of fragments.
196 */
197 tmpmax = maxfrags;
198 if (V_noreass == 1 || V_maxfragsperpacket == 0 ||
199 (tmpmax >= 0 && nfrags >= (u_int)tmpmax)) {
200 IPSTAT_INC(ips_fragments);
201 IPSTAT_INC(ips_fragdropped);
202 m_freem(m);
203 return (NULL);
204 }
205
206 ip = mtod(m, struct ip *);
207 hlen = ip->ip_hl << 2;
208
209 /*
210 * Adjust ip_len to not reflect header,
211 * convert offset of this to bytes.
212 */
213 ip->ip_len = htons(ntohs(ip->ip_len) - hlen);
214 if (ip->ip_off & htons(IP_MF)) {
215 /*
216 * Make sure that fragments have a data length
217 * that's a non-zero multiple of 8 bytes.
218 */
219 if (ip->ip_len == htons(0) || (ntohs(ip->ip_len) & 0x7) != 0) {
220 IPSTAT_INC(ips_toosmall); /* XXX */
221 IPSTAT_INC(ips_fragdropped);
222 m_freem(m);
223 return (NULL);
224 }
225 m->m_flags |= M_IP_FRAG;
226 } else
227 m->m_flags &= ~M_IP_FRAG;
228 ip->ip_off = htons(ntohs(ip->ip_off) << 3);
229
230 /*
231 * Attempt reassembly; if it succeeds, proceed.
232 * ip_reass() will return a different mbuf.
233 */
234 IPSTAT_INC(ips_fragments);
235 m->m_pkthdr.PH_loc.ptr = ip;
236
237 /*
238 * Presence of header sizes in mbufs
239 * would confuse code below.
240 */
241 m->m_data += hlen;
242 m->m_len -= hlen;
243
244 hashkey[0] = ip->ip_src.s_addr;
245 hashkey[1] = ip->ip_dst.s_addr;
246 hashkey[2] = (uint32_t)ip->ip_p << 16;
247 hashkey[2] += ip->ip_id;
248 hash = jenkins_hash32(hashkey, nitems(hashkey), V_ipq_hashseed);
249 hash &= IPREASS_HMASK;
250 head = &V_ipq[hash].head;
251 IPQ_LOCK(hash);
252
253 /*
254 * Look for queue of fragments
255 * of this datagram.
256 */
257 TAILQ_FOREACH(fp, head, ipq_list)
258 if (ip->ip_id == fp->ipq_id &&
259 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
260 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
261 #ifdef MAC
262 mac_ipq_match(m, fp) &&
263 #endif
264 ip->ip_p == fp->ipq_p)
265 break;
266 /*
267 * If first fragment to arrive, create a reassembly queue.
268 */
269 if (fp == NULL) {
270 if (V_ipq[hash].count < V_ipreass_maxbucketsize)
271 fp = uma_zalloc(V_ipq_zone, M_NOWAIT);
272 if (fp == NULL)
273 fp = ipq_reuse(hash);
274 if (fp == NULL)
275 goto dropfrag;
276 #ifdef MAC
277 if (mac_ipq_init(fp, M_NOWAIT) != 0) {
278 uma_zfree(V_ipq_zone, fp);
279 fp = NULL;
280 goto dropfrag;
281 }
282 mac_ipq_create(m, fp);
283 #endif
284 TAILQ_INSERT_HEAD(head, fp, ipq_list);
285 V_ipq[hash].count++;
286 fp->ipq_nfrags = 1;
287 atomic_add_int(&nfrags, 1);
288 fp->ipq_ttl = IPFRAGTTL;
289 fp->ipq_p = ip->ip_p;
290 fp->ipq_id = ip->ip_id;
291 fp->ipq_src = ip->ip_src;
292 fp->ipq_dst = ip->ip_dst;
293 fp->ipq_frags = m;
294 m->m_nextpkt = NULL;
295 goto done;
296 } else {
297 fp->ipq_nfrags++;
298 atomic_add_int(&nfrags, 1);
299 #ifdef MAC
300 mac_ipq_update(m, fp);
301 #endif
302 }
303
304 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.PH_loc.ptr))
305
306 /*
307 * Handle ECN by comparing this segment with the first one;
308 * if CE is set, do not lose CE.
309 * drop if CE and not-ECT are mixed for the same packet.
310 */
311 ecn = ip->ip_tos & IPTOS_ECN_MASK;
312 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
313 if (ecn == IPTOS_ECN_CE) {
314 if (ecn0 == IPTOS_ECN_NOTECT)
315 goto dropfrag;
316 if (ecn0 != IPTOS_ECN_CE)
317 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
318 }
319 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
320 goto dropfrag;
321
322 /*
323 * Find a segment which begins after this one does.
324 */
325 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
326 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off))
327 break;
328
329 /*
330 * If there is a preceding segment, it may provide some of
331 * our data already. If so, drop the data from the incoming
332 * segment. If it provides all of our data, drop us, otherwise
333 * stick new segment in the proper place.
334 *
335 * If some of the data is dropped from the preceding
336 * segment, then it's checksum is invalidated.
337 */
338 if (p) {
339 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) -
340 ntohs(ip->ip_off);
341 if (i > 0) {
342 if (i >= ntohs(ip->ip_len))
343 goto dropfrag;
344 m_adj(m, i);
345 m->m_pkthdr.csum_flags = 0;
346 ip->ip_off = htons(ntohs(ip->ip_off) + i);
347 ip->ip_len = htons(ntohs(ip->ip_len) - i);
348 }
349 m->m_nextpkt = p->m_nextpkt;
350 p->m_nextpkt = m;
351 } else {
352 m->m_nextpkt = fp->ipq_frags;
353 fp->ipq_frags = m;
354 }
355
356 /*
357 * While we overlap succeeding segments trim them or,
358 * if they are completely covered, dequeue them.
359 */
360 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) >
361 ntohs(GETIP(q)->ip_off); q = nq) {
362 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) -
363 ntohs(GETIP(q)->ip_off);
364 if (i < ntohs(GETIP(q)->ip_len)) {
365 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i);
366 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i);
367 m_adj(q, i);
368 q->m_pkthdr.csum_flags = 0;
369 break;
370 }
371 nq = q->m_nextpkt;
372 m->m_nextpkt = nq;
373 IPSTAT_INC(ips_fragdropped);
374 fp->ipq_nfrags--;
375 atomic_subtract_int(&nfrags, 1);
376 m_freem(q);
377 }
378
379 /*
380 * Check for complete reassembly and perform frag per packet
381 * limiting.
382 *
383 * Frag limiting is performed here so that the nth frag has
384 * a chance to complete the packet before we drop the packet.
385 * As a result, n+1 frags are actually allowed per packet, but
386 * only n will ever be stored. (n = maxfragsperpacket.)
387 *
388 */
389 next = 0;
390 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
391 if (ntohs(GETIP(q)->ip_off) != next) {
392 if (fp->ipq_nfrags > V_maxfragsperpacket)
393 ipq_drop(&V_ipq[hash], fp);
394 goto done;
395 }
396 next += ntohs(GETIP(q)->ip_len);
397 }
398 /* Make sure the last packet didn't have the IP_MF flag */
399 if (p->m_flags & M_IP_FRAG) {
400 if (fp->ipq_nfrags > V_maxfragsperpacket)
401 ipq_drop(&V_ipq[hash], fp);
402 goto done;
403 }
404
405 /*
406 * Reassembly is complete. Make sure the packet is a sane size.
407 */
408 q = fp->ipq_frags;
409 ip = GETIP(q);
410 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) {
411 IPSTAT_INC(ips_toolong);
412 ipq_drop(&V_ipq[hash], fp);
413 goto done;
414 }
415
416 /*
417 * Concatenate fragments.
418 */
419 m = q;
420 t = m->m_next;
421 m->m_next = NULL;
422 m_cat(m, t);
423 nq = q->m_nextpkt;
424 q->m_nextpkt = NULL;
425 for (q = nq; q != NULL; q = nq) {
426 nq = q->m_nextpkt;
427 q->m_nextpkt = NULL;
428 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
429 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
430 m_demote_pkthdr(q);
431 m_cat(m, q);
432 }
433 /*
434 * In order to do checksumming faster we do 'end-around carry' here
435 * (and not in for{} loop), though it implies we are not going to
436 * reassemble more than 64k fragments.
437 */
438 while (m->m_pkthdr.csum_data & 0xffff0000)
439 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
440 (m->m_pkthdr.csum_data >> 16);
441 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
442 #ifdef MAC
443 mac_ipq_reassemble(fp, m);
444 mac_ipq_destroy(fp);
445 #endif
446
447 /*
448 * Create header for new ip packet by modifying header of first
449 * packet; dequeue and discard fragment reassembly header.
450 * Make header visible.
451 */
452 ip->ip_len = htons((ip->ip_hl << 2) + next);
453 ip->ip_src = fp->ipq_src;
454 ip->ip_dst = fp->ipq_dst;
455 TAILQ_REMOVE(head, fp, ipq_list);
456 V_ipq[hash].count--;
457 uma_zfree(V_ipq_zone, fp);
458 m->m_len += (ip->ip_hl << 2);
459 m->m_data -= (ip->ip_hl << 2);
460 /* some debugging cruft by sklower, below, will go away soon */
461 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
462 m_fixhdr(m);
463 IPSTAT_INC(ips_reassembled);
464 IPQ_UNLOCK(hash);
465
466 #ifdef RSS
467 /*
468 * Query the RSS layer for the flowid / flowtype for the
469 * mbuf payload.
470 *
471 * For now, just assume we have to calculate a new one.
472 * Later on we should check to see if the assigned flowid matches
473 * what RSS wants for the given IP protocol and if so, just keep it.
474 *
475 * We then queue into the relevant netisr so it can be dispatched
476 * to the correct CPU.
477 *
478 * Note - this may return 1, which means the flowid in the mbuf
479 * is correct for the configured RSS hash types and can be used.
480 */
481 if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) {
482 m->m_pkthdr.flowid = rss_hash;
483 M_HASHTYPE_SET(m, rss_type);
484 }
485
486 /*
487 * Queue/dispatch for reprocessing.
488 *
489 * Note: this is much slower than just handling the frame in the
490 * current receive context. It's likely worth investigating
491 * why this is.
492 */
493 netisr_dispatch(NETISR_IP_DIRECT, m);
494 return (NULL);
495 #endif
496
497 /* Handle in-line */
498 return (m);
499
500 dropfrag:
501 IPSTAT_INC(ips_fragdropped);
502 if (fp != NULL) {
503 fp->ipq_nfrags--;
504 atomic_subtract_int(&nfrags, 1);
505 }
506 m_freem(m);
507 done:
508 IPQ_UNLOCK(hash);
509 return (NULL);
510
511 #undef GETIP
512 }
513
514 /*
515 * Initialize IP reassembly structures.
516 */
517 void
518 ipreass_init(void)
519 {
520 int max;
521
522 for (int i = 0; i < IPREASS_NHASH; i++) {
523 TAILQ_INIT(&V_ipq[i].head);
524 mtx_init(&V_ipq[i].lock, "IP reassembly", NULL,
525 MTX_DEF | MTX_DUPOK);
526 V_ipq[i].count = 0;
527 }
528 V_ipq_hashseed = arc4random();
529 V_maxfragsperpacket = 16;
530 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
531 NULL, UMA_ALIGN_PTR, 0);
532 max = IP_MAXFRAGPACKETS;
533 max = uma_zone_set_max(V_ipq_zone, max);
534 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
535
536 if (IS_DEFAULT_VNET(curvnet)) {
537 maxfrags = IP_MAXFRAGS;
538 EVENTHANDLER_REGISTER(nmbclusters_change, ipreass_zone_change,
539 NULL, EVENTHANDLER_PRI_ANY);
540 }
541 }
542
543 /*
544 * If a timer expires on a reassembly queue, discard it.
545 */
546 void
547 ipreass_slowtimo(void)
548 {
549 struct ipq *fp, *tmp;
550
551 for (int i = 0; i < IPREASS_NHASH; i++) {
552 IPQ_LOCK(i);
553 TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, tmp)
554 if (--fp->ipq_ttl == 0)
555 ipq_timeout(&V_ipq[i], fp);
556 IPQ_UNLOCK(i);
557 }
558 }
559
560 /*
561 * Drain off all datagram fragments.
562 */
563 void
564 ipreass_drain(void)
565 {
566
567 for (int i = 0; i < IPREASS_NHASH; i++) {
568 IPQ_LOCK(i);
569 while(!TAILQ_EMPTY(&V_ipq[i].head))
570 ipq_drop(&V_ipq[i], TAILQ_FIRST(&V_ipq[i].head));
571 KASSERT(V_ipq[i].count == 0,
572 ("%s: V_ipq[%d] count %d (V_ipq=%p)", __func__, i,
573 V_ipq[i].count, V_ipq));
574 IPQ_UNLOCK(i);
575 }
576 }
577
578 #ifdef VIMAGE
579 /*
580 * Destroy IP reassembly structures.
581 */
582 void
583 ipreass_destroy(void)
584 {
585
586 ipreass_drain();
587 uma_zdestroy(V_ipq_zone);
588 for (int i = 0; i < IPREASS_NHASH; i++)
589 mtx_destroy(&V_ipq[i].lock);
590 }
591 #endif
592
593 /*
594 * After maxnipq has been updated, propagate the change to UMA. The UMA zone
595 * max has slightly different semantics than the sysctl, for historical
596 * reasons.
597 */
598 static void
599 ipreass_drain_tomax(void)
600 {
601 struct ipq *fp;
602 int target;
603
604 /*
605 * Make sure each bucket is under the new limit. If
606 * necessary, drop enough of the oldest elements from
607 * each bucket to get under the new limit.
608 */
609 for (int i = 0; i < IPREASS_NHASH; i++) {
610 IPQ_LOCK(i);
611 while (V_ipq[i].count > V_ipreass_maxbucketsize &&
612 (fp = TAILQ_LAST(&V_ipq[i].head, ipqhead)) != NULL)
613 ipq_timeout(&V_ipq[i], fp);
614 IPQ_UNLOCK(i);
615 }
616
617 /*
618 * If we are over the maximum number of fragments,
619 * drain off enough to get down to the new limit,
620 * stripping off last elements on queues. Every
621 * run we strip the oldest element from each bucket.
622 */
623 target = uma_zone_get_max(V_ipq_zone);
624 while (uma_zone_get_cur(V_ipq_zone) > target) {
625 for (int i = 0; i < IPREASS_NHASH; i++) {
626 IPQ_LOCK(i);
627 fp = TAILQ_LAST(&V_ipq[i].head, ipqhead);
628 if (fp != NULL)
629 ipq_timeout(&V_ipq[i], fp);
630 IPQ_UNLOCK(i);
631 }
632 }
633 }
634
635 static void
636 ipreass_zone_change(void *tag)
637 {
638 VNET_ITERATOR_DECL(vnet_iter);
639 int max;
640
641 maxfrags = IP_MAXFRAGS;
642 max = IP_MAXFRAGPACKETS;
643 VNET_LIST_RLOCK_NOSLEEP();
644 VNET_FOREACH(vnet_iter) {
645 CURVNET_SET(vnet_iter);
646 max = uma_zone_set_max(V_ipq_zone, max);
647 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
648 ipreass_drain_tomax();
649 CURVNET_RESTORE();
650 }
651 VNET_LIST_RUNLOCK_NOSLEEP();
652 }
653
654 /*
655 * Change the limit on the UMA zone, or disable the fragment allocation
656 * at all. Since 0 and -1 is a special values here, we need our own handler,
657 * instead of sysctl_handle_uma_zone_max().
658 */
659 static int
660 sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS)
661 {
662 int error, max;
663
664 if (V_noreass == 0) {
665 max = uma_zone_get_max(V_ipq_zone);
666 if (max == 0)
667 max = -1;
668 } else
669 max = 0;
670 error = sysctl_handle_int(oidp, &max, 0, req);
671 if (error || !req->newptr)
672 return (error);
673 if (max > 0) {
674 /*
675 * XXXRW: Might be a good idea to sanity check the argument
676 * and place an extreme upper bound.
677 */
678 max = uma_zone_set_max(V_ipq_zone, max);
679 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1);
680 ipreass_drain_tomax();
681 V_noreass = 0;
682 } else if (max == 0) {
683 V_noreass = 1;
684 ipreass_drain();
685 } else if (max == -1) {
686 V_noreass = 0;
687 uma_zone_set_max(V_ipq_zone, 0);
688 V_ipreass_maxbucketsize = INT_MAX;
689 } else
690 return (EINVAL);
691 return (0);
692 }
693
694 /*
695 * Seek for old fragment queue header that can be reused. Try to
696 * reuse a header from currently locked hash bucket.
697 */
698 static struct ipq *
699 ipq_reuse(int start)
700 {
701 struct ipq *fp;
702 int bucket, i;
703
704 IPQ_LOCK_ASSERT(start);
705
706 for (i = 0; i < IPREASS_NHASH; i++) {
707 bucket = (start + i) % IPREASS_NHASH;
708 if (bucket != start && IPQ_TRYLOCK(bucket) == 0)
709 continue;
710 fp = TAILQ_LAST(&V_ipq[bucket].head, ipqhead);
711 if (fp) {
712 struct mbuf *m;
713
714 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags);
715 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
716 while (fp->ipq_frags) {
717 m = fp->ipq_frags;
718 fp->ipq_frags = m->m_nextpkt;
719 m_freem(m);
720 }
721 TAILQ_REMOVE(&V_ipq[bucket].head, fp, ipq_list);
722 V_ipq[bucket].count--;
723 if (bucket != start)
724 IPQ_UNLOCK(bucket);
725 break;
726 }
727 if (bucket != start)
728 IPQ_UNLOCK(bucket);
729 }
730 IPQ_LOCK_ASSERT(start);
731 return (fp);
732 }
733
734 /*
735 * Free a fragment reassembly header and all associated datagrams.
736 */
737 static void
738 ipq_free(struct ipqbucket *bucket, struct ipq *fp)
739 {
740 struct mbuf *q;
741
742 atomic_subtract_int(&nfrags, fp->ipq_nfrags);
743 while (fp->ipq_frags) {
744 q = fp->ipq_frags;
745 fp->ipq_frags = q->m_nextpkt;
746 m_freem(q);
747 }
748 TAILQ_REMOVE(&bucket->head, fp, ipq_list);
749 bucket->count--;
750 uma_zfree(V_ipq_zone, fp);
751 }
752
753 /*
754 * Get or set the maximum number of reassembly queues per bucket.
755 */
756 static int
757 sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS)
758 {
759 int error, max;
760
761 max = V_ipreass_maxbucketsize;
762 error = sysctl_handle_int(oidp, &max, 0, req);
763 if (error || !req->newptr)
764 return (error);
765 if (max <= 0)
766 return (EINVAL);
767 V_ipreass_maxbucketsize = max;
768 ipreass_drain_tomax();
769 return (0);
770 }
Cache object: b08dda7726e728fa4e8aa6ae580602fd
|