FreeBSD/Linux Kernel Cross Reference
sys/kern/uipc_mbuf.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_param.h"
38 #include "opt_mbuf_stress_test.h"
39 #include "opt_mbuf_profiling.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/limits.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/sysctl.h>
49 #include <sys/domain.h>
50 #include <sys/protosw.h>
51 #include <sys/uio.h>
52 #include <sys/vmmeter.h>
53 #include <sys/sdt.h>
54 #include <vm/vm.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_page.h>
57
58 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__init,
59 "struct mbuf *", "mbufinfo_t *",
60 "uint32_t", "uint32_t",
61 "uint16_t", "uint16_t",
62 "uint32_t", "uint32_t",
63 "uint32_t", "uint32_t");
64
65 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__gethdr,
66 "uint32_t", "uint32_t",
67 "uint16_t", "uint16_t",
68 "struct mbuf *", "mbufinfo_t *");
69
70 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__get,
71 "uint32_t", "uint32_t",
72 "uint16_t", "uint16_t",
73 "struct mbuf *", "mbufinfo_t *");
74
75 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__getcl,
76 "uint32_t", "uint32_t",
77 "uint16_t", "uint16_t",
78 "uint32_t", "uint32_t",
79 "struct mbuf *", "mbufinfo_t *");
80
81 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__getjcl,
82 "uint32_t", "uint32_t",
83 "uint16_t", "uint16_t",
84 "uint32_t", "uint32_t",
85 "uint32_t", "uint32_t",
86 "struct mbuf *", "mbufinfo_t *");
87
88 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__clget,
89 "struct mbuf *", "mbufinfo_t *",
90 "uint32_t", "uint32_t",
91 "uint32_t", "uint32_t");
92
93 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__cljget,
94 "struct mbuf *", "mbufinfo_t *",
95 "uint32_t", "uint32_t",
96 "uint32_t", "uint32_t",
97 "void*", "void*");
98
99 SDT_PROBE_DEFINE(sdt, , , m__cljset);
100
101 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__free,
102 "struct mbuf *", "mbufinfo_t *");
103
104 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__freem,
105 "struct mbuf *", "mbufinfo_t *");
106
107 #include <security/mac/mac_framework.h>
108
109 int max_linkhdr;
110 int max_protohdr;
111 int max_hdr;
112 int max_datalen;
113 #ifdef MBUF_STRESS_TEST
114 int m_defragpackets;
115 int m_defragbytes;
116 int m_defraguseless;
117 int m_defragfailure;
118 int m_defragrandomfailures;
119 #endif
120
121 /*
122 * sysctl(8) exported objects
123 */
124 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
125 &max_linkhdr, 0, "Size of largest link layer header");
126 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
127 &max_protohdr, 0, "Size of largest protocol layer header");
128 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
129 &max_hdr, 0, "Size of largest link plus protocol header");
130 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
131 &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
132 #ifdef MBUF_STRESS_TEST
133 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
134 &m_defragpackets, 0, "");
135 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
136 &m_defragbytes, 0, "");
137 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
138 &m_defraguseless, 0, "");
139 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
140 &m_defragfailure, 0, "");
141 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
142 &m_defragrandomfailures, 0, "");
143 #endif
144
145 /*
146 * Ensure the correct size of various mbuf parameters. It could be off due
147 * to compiler-induced padding and alignment artifacts.
148 */
149 CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN);
150 CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN);
151
152 /*
153 * mbuf data storage should be 64-bit aligned regardless of architectural
154 * pointer size; check this is the case with and without a packet header.
155 */
156 CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0);
157 CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0);
158
159 /*
160 * While the specific values here don't matter too much (i.e., +/- a few
161 * words), we do want to ensure that changes to these values are carefully
162 * reasoned about and properly documented. This is especially the case as
163 * network-protocol and device-driver modules encode these layouts, and must
164 * be recompiled if the structures change. Check these values at compile time
165 * against the ones documented in comments in mbuf.h.
166 *
167 * NB: Possibly they should be documented there via #define's and not just
168 * comments.
169 */
170 #if defined(__LP64__)
171 CTASSERT(offsetof(struct mbuf, m_dat) == 32);
172 CTASSERT(sizeof(struct pkthdr) == 56);
173 CTASSERT(sizeof(struct m_ext) == 160);
174 #else
175 CTASSERT(offsetof(struct mbuf, m_dat) == 24);
176 CTASSERT(sizeof(struct pkthdr) == 48);
177 #if defined(__powerpc__) && defined(BOOKE)
178 /* PowerPC booke has 64-bit physical pointers. */
179 CTASSERT(sizeof(struct m_ext) == 184);
180 #else
181 CTASSERT(sizeof(struct m_ext) == 180);
182 #endif
183 #endif
184
185 /*
186 * Assert that the queue(3) macros produce code of the same size as an old
187 * plain pointer does.
188 */
189 #ifdef INVARIANTS
190 static struct mbuf __used m_assertbuf;
191 CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next));
192 CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next));
193 CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt));
194 CTASSERT(sizeof(m_assertbuf.m_stailqpkt) == sizeof(m_assertbuf.m_nextpkt));
195 #endif
196
197 /*
198 * Attach the cluster from *m to *n, set up m_ext in *n
199 * and bump the refcount of the cluster.
200 */
201 void
202 mb_dupcl(struct mbuf *n, struct mbuf *m)
203 {
204 volatile u_int *refcnt;
205
206 KASSERT(m->m_flags & (M_EXT|M_EXTPG),
207 ("%s: M_EXT|M_EXTPG not set on %p", __func__, m));
208 KASSERT(!(n->m_flags & (M_EXT|M_EXTPG)),
209 ("%s: M_EXT|M_EXTPG set on %p", __func__, n));
210
211 /*
212 * Cache access optimization.
213 *
214 * o Regular M_EXT storage doesn't need full copy of m_ext, since
215 * the holder of the 'ext_count' is responsible to carry the free
216 * routine and its arguments.
217 * o M_EXTPG data is split between main part of mbuf and m_ext, the
218 * main part is copied in full, the m_ext part is similar to M_EXT.
219 * o EXT_EXTREF, where 'ext_cnt' doesn't point into mbuf at all, is
220 * special - it needs full copy of m_ext into each mbuf, since any
221 * copy could end up as the last to free.
222 */
223 if (m->m_flags & M_EXTPG) {
224 bcopy(&m->m_epg_startcopy, &n->m_epg_startcopy,
225 __rangeof(struct mbuf, m_epg_startcopy, m_epg_endcopy));
226 bcopy(&m->m_ext, &n->m_ext, m_epg_ext_copylen);
227 } else if (m->m_ext.ext_type == EXT_EXTREF)
228 bcopy(&m->m_ext, &n->m_ext, sizeof(struct m_ext));
229 else
230 bcopy(&m->m_ext, &n->m_ext, m_ext_copylen);
231
232 n->m_flags |= m->m_flags & (M_RDONLY | M_EXT | M_EXTPG);
233
234 /* See if this is the mbuf that holds the embedded refcount. */
235 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
236 refcnt = n->m_ext.ext_cnt = &m->m_ext.ext_count;
237 n->m_ext.ext_flags &= ~EXT_FLAG_EMBREF;
238 } else {
239 KASSERT(m->m_ext.ext_cnt != NULL,
240 ("%s: no refcounting pointer on %p", __func__, m));
241 refcnt = m->m_ext.ext_cnt;
242 }
243
244 if (*refcnt == 1)
245 *refcnt += 1;
246 else
247 atomic_add_int(refcnt, 1);
248 }
249
250 void
251 m_demote_pkthdr(struct mbuf *m)
252 {
253
254 M_ASSERTPKTHDR(m);
255
256 m_tag_delete_chain(m, NULL);
257 m->m_flags &= ~M_PKTHDR;
258 bzero(&m->m_pkthdr, sizeof(struct pkthdr));
259 }
260
261 /*
262 * Clean up mbuf (chain) from any tags and packet headers.
263 * If "all" is set then the first mbuf in the chain will be
264 * cleaned too.
265 */
266 void
267 m_demote(struct mbuf *m0, int all, int flags)
268 {
269 struct mbuf *m;
270
271 for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
272 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p",
273 __func__, m, m0));
274 if (m->m_flags & M_PKTHDR)
275 m_demote_pkthdr(m);
276 m->m_flags = m->m_flags & (M_EXT | M_RDONLY | M_NOFREE |
277 M_EXTPG | flags);
278 }
279 }
280
281 /*
282 * Sanity checks on mbuf (chain) for use in KASSERT() and general
283 * debugging.
284 * Returns 0 or panics when bad and 1 on all tests passed.
285 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
286 * blow up later.
287 */
288 int
289 m_sanity(struct mbuf *m0, int sanitize)
290 {
291 struct mbuf *m;
292 caddr_t a, b;
293 int pktlen = 0;
294
295 #ifdef INVARIANTS
296 #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m)
297 #else
298 #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m)
299 #endif
300
301 for (m = m0; m != NULL; m = m->m_next) {
302 /*
303 * Basic pointer checks. If any of these fails then some
304 * unrelated kernel memory before or after us is trashed.
305 * No way to recover from that.
306 */
307 a = M_START(m);
308 b = a + M_SIZE(m);
309 if ((caddr_t)m->m_data < a)
310 M_SANITY_ACTION("m_data outside mbuf data range left");
311 if ((caddr_t)m->m_data > b)
312 M_SANITY_ACTION("m_data outside mbuf data range right");
313 if ((caddr_t)m->m_data + m->m_len > b)
314 M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
315
316 /* m->m_nextpkt may only be set on first mbuf in chain. */
317 if (m != m0 && m->m_nextpkt != NULL) {
318 if (sanitize) {
319 m_freem(m->m_nextpkt);
320 m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
321 } else
322 M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
323 }
324
325 /* packet length (not mbuf length!) calculation */
326 if (m0->m_flags & M_PKTHDR)
327 pktlen += m->m_len;
328
329 /* m_tags may only be attached to first mbuf in chain. */
330 if (m != m0 && m->m_flags & M_PKTHDR &&
331 !SLIST_EMPTY(&m->m_pkthdr.tags)) {
332 if (sanitize) {
333 m_tag_delete_chain(m, NULL);
334 /* put in 0xDEADC0DE perhaps? */
335 } else
336 M_SANITY_ACTION("m_tags on in-chain mbuf");
337 }
338
339 /* M_PKTHDR may only be set on first mbuf in chain */
340 if (m != m0 && m->m_flags & M_PKTHDR) {
341 if (sanitize) {
342 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
343 m->m_flags &= ~M_PKTHDR;
344 /* put in 0xDEADCODE and leave hdr flag in */
345 } else
346 M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
347 }
348 }
349 m = m0;
350 if (pktlen && pktlen != m->m_pkthdr.len) {
351 if (sanitize)
352 m->m_pkthdr.len = 0;
353 else
354 M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
355 }
356 return 1;
357
358 #undef M_SANITY_ACTION
359 }
360
361 /*
362 * Non-inlined part of m_init().
363 */
364 int
365 m_pkthdr_init(struct mbuf *m, int how)
366 {
367 #ifdef MAC
368 int error;
369 #endif
370 m->m_data = m->m_pktdat;
371 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
372 #ifdef NUMA
373 m->m_pkthdr.numa_domain = M_NODOM;
374 #endif
375 #ifdef MAC
376 /* If the label init fails, fail the alloc */
377 error = mac_mbuf_init(m, how);
378 if (error)
379 return (error);
380 #endif
381
382 return (0);
383 }
384
385 /*
386 * "Move" mbuf pkthdr from "from" to "to".
387 * "from" must have M_PKTHDR set, and "to" must be empty.
388 */
389 void
390 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
391 {
392
393 #if 0
394 /* see below for why these are not enabled */
395 M_ASSERTPKTHDR(to);
396 /* Note: with MAC, this may not be a good assertion. */
397 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
398 ("m_move_pkthdr: to has tags"));
399 #endif
400 #ifdef MAC
401 /*
402 * XXXMAC: It could be this should also occur for non-MAC?
403 */
404 if (to->m_flags & M_PKTHDR)
405 m_tag_delete_chain(to, NULL);
406 #endif
407 to->m_flags = (from->m_flags & M_COPYFLAGS) |
408 (to->m_flags & (M_EXT | M_EXTPG));
409 if ((to->m_flags & M_EXT) == 0)
410 to->m_data = to->m_pktdat;
411 to->m_pkthdr = from->m_pkthdr; /* especially tags */
412 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
413 from->m_flags &= ~M_PKTHDR;
414 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG) {
415 from->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
416 from->m_pkthdr.snd_tag = NULL;
417 }
418 }
419
420 /*
421 * Duplicate "from"'s mbuf pkthdr in "to".
422 * "from" must have M_PKTHDR set, and "to" must be empty.
423 * In particular, this does a deep copy of the packet tags.
424 */
425 int
426 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
427 {
428
429 #if 0
430 /*
431 * The mbuf allocator only initializes the pkthdr
432 * when the mbuf is allocated with m_gethdr(). Many users
433 * (e.g. m_copy*, m_prepend) use m_get() and then
434 * smash the pkthdr as needed causing these
435 * assertions to trip. For now just disable them.
436 */
437 M_ASSERTPKTHDR(to);
438 /* Note: with MAC, this may not be a good assertion. */
439 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
440 #endif
441 MBUF_CHECKSLEEP(how);
442 #ifdef MAC
443 if (to->m_flags & M_PKTHDR)
444 m_tag_delete_chain(to, NULL);
445 #endif
446 to->m_flags = (from->m_flags & M_COPYFLAGS) |
447 (to->m_flags & (M_EXT | M_EXTPG));
448 if ((to->m_flags & M_EXT) == 0)
449 to->m_data = to->m_pktdat;
450 to->m_pkthdr = from->m_pkthdr;
451 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG)
452 m_snd_tag_ref(from->m_pkthdr.snd_tag);
453 SLIST_INIT(&to->m_pkthdr.tags);
454 return (m_tag_copy_chain(to, from, how));
455 }
456
457 /*
458 * Lesser-used path for M_PREPEND:
459 * allocate new mbuf to prepend to chain,
460 * copy junk along.
461 */
462 struct mbuf *
463 m_prepend(struct mbuf *m, int len, int how)
464 {
465 struct mbuf *mn;
466
467 if (m->m_flags & M_PKTHDR)
468 mn = m_gethdr(how, m->m_type);
469 else
470 mn = m_get(how, m->m_type);
471 if (mn == NULL) {
472 m_freem(m);
473 return (NULL);
474 }
475 if (m->m_flags & M_PKTHDR)
476 m_move_pkthdr(mn, m);
477 mn->m_next = m;
478 m = mn;
479 if (len < M_SIZE(m))
480 M_ALIGN(m, len);
481 m->m_len = len;
482 return (m);
483 }
484
485 /*
486 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
487 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
488 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller.
489 * Note that the copy is read-only, because clusters are not copied,
490 * only their reference counts are incremented.
491 */
492 struct mbuf *
493 m_copym(struct mbuf *m, int off0, int len, int wait)
494 {
495 struct mbuf *n, **np;
496 int off = off0;
497 struct mbuf *top;
498 int copyhdr = 0;
499
500 KASSERT(off >= 0, ("m_copym, negative off %d", off));
501 KASSERT(len >= 0, ("m_copym, negative len %d", len));
502 MBUF_CHECKSLEEP(wait);
503 if (off == 0 && m->m_flags & M_PKTHDR)
504 copyhdr = 1;
505 while (off > 0) {
506 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
507 if (off < m->m_len)
508 break;
509 off -= m->m_len;
510 m = m->m_next;
511 }
512 np = ⊤
513 top = NULL;
514 while (len > 0) {
515 if (m == NULL) {
516 KASSERT(len == M_COPYALL,
517 ("m_copym, length > size of mbuf chain"));
518 break;
519 }
520 if (copyhdr)
521 n = m_gethdr(wait, m->m_type);
522 else
523 n = m_get(wait, m->m_type);
524 *np = n;
525 if (n == NULL)
526 goto nospace;
527 if (copyhdr) {
528 if (!m_dup_pkthdr(n, m, wait))
529 goto nospace;
530 if (len == M_COPYALL)
531 n->m_pkthdr.len -= off0;
532 else
533 n->m_pkthdr.len = len;
534 copyhdr = 0;
535 }
536 n->m_len = min(len, m->m_len - off);
537 if (m->m_flags & (M_EXT|M_EXTPG)) {
538 n->m_data = m->m_data + off;
539 mb_dupcl(n, m);
540 } else
541 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
542 (u_int)n->m_len);
543 if (len != M_COPYALL)
544 len -= n->m_len;
545 off = 0;
546 m = m->m_next;
547 np = &n->m_next;
548 }
549
550 return (top);
551 nospace:
552 m_freem(top);
553 return (NULL);
554 }
555
556 /*
557 * Copy an entire packet, including header (which must be present).
558 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
559 * Note that the copy is read-only, because clusters are not copied,
560 * only their reference counts are incremented.
561 * Preserve alignment of the first mbuf so if the creator has left
562 * some room at the beginning (e.g. for inserting protocol headers)
563 * the copies still have the room available.
564 */
565 struct mbuf *
566 m_copypacket(struct mbuf *m, int how)
567 {
568 struct mbuf *top, *n, *o;
569
570 MBUF_CHECKSLEEP(how);
571 n = m_get(how, m->m_type);
572 top = n;
573 if (n == NULL)
574 goto nospace;
575
576 if (!m_dup_pkthdr(n, m, how))
577 goto nospace;
578 n->m_len = m->m_len;
579 if (m->m_flags & (M_EXT|M_EXTPG)) {
580 n->m_data = m->m_data;
581 mb_dupcl(n, m);
582 } else {
583 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
584 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
585 }
586
587 m = m->m_next;
588 while (m) {
589 o = m_get(how, m->m_type);
590 if (o == NULL)
591 goto nospace;
592
593 n->m_next = o;
594 n = n->m_next;
595
596 n->m_len = m->m_len;
597 if (m->m_flags & (M_EXT|M_EXTPG)) {
598 n->m_data = m->m_data;
599 mb_dupcl(n, m);
600 } else {
601 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
602 }
603
604 m = m->m_next;
605 }
606 return top;
607 nospace:
608 m_freem(top);
609 return (NULL);
610 }
611
612 static void
613 m_copyfromunmapped(const struct mbuf *m, int off, int len, caddr_t cp)
614 {
615 struct iovec iov;
616 struct uio uio;
617 int error;
618
619 KASSERT(off >= 0, ("m_copyfromunmapped: negative off %d", off));
620 KASSERT(len >= 0, ("m_copyfromunmapped: negative len %d", len));
621 KASSERT(off < m->m_len,
622 ("m_copyfromunmapped: len exceeds mbuf length"));
623 iov.iov_base = cp;
624 iov.iov_len = len;
625 uio.uio_resid = len;
626 uio.uio_iov = &iov;
627 uio.uio_segflg = UIO_SYSSPACE;
628 uio.uio_iovcnt = 1;
629 uio.uio_offset = 0;
630 uio.uio_rw = UIO_READ;
631 error = m_unmappedtouio(m, off, &uio, len);
632 KASSERT(error == 0, ("m_unmappedtouio failed: off %d, len %d", off,
633 len));
634 }
635
636 /*
637 * Copy data from an mbuf chain starting "off" bytes from the beginning,
638 * continuing for "len" bytes, into the indicated buffer.
639 */
640 void
641 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
642 {
643 u_int count;
644
645 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
646 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
647 while (off > 0) {
648 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
649 if (off < m->m_len)
650 break;
651 off -= m->m_len;
652 m = m->m_next;
653 }
654 while (len > 0) {
655 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
656 count = min(m->m_len - off, len);
657 if ((m->m_flags & M_EXTPG) != 0)
658 m_copyfromunmapped(m, off, count, cp);
659 else
660 bcopy(mtod(m, caddr_t) + off, cp, count);
661 len -= count;
662 cp += count;
663 off = 0;
664 m = m->m_next;
665 }
666 }
667
668 /*
669 * Copy a packet header mbuf chain into a completely new chain, including
670 * copying any mbuf clusters. Use this instead of m_copypacket() when
671 * you need a writable copy of an mbuf chain.
672 */
673 struct mbuf *
674 m_dup(const struct mbuf *m, int how)
675 {
676 struct mbuf **p, *top = NULL;
677 int remain, moff, nsize;
678
679 MBUF_CHECKSLEEP(how);
680 /* Sanity check */
681 if (m == NULL)
682 return (NULL);
683 M_ASSERTPKTHDR(m);
684
685 /* While there's more data, get a new mbuf, tack it on, and fill it */
686 remain = m->m_pkthdr.len;
687 moff = 0;
688 p = ⊤
689 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
690 struct mbuf *n;
691
692 /* Get the next new mbuf */
693 if (remain >= MINCLSIZE) {
694 n = m_getcl(how, m->m_type, 0);
695 nsize = MCLBYTES;
696 } else {
697 n = m_get(how, m->m_type);
698 nsize = MLEN;
699 }
700 if (n == NULL)
701 goto nospace;
702
703 if (top == NULL) { /* First one, must be PKTHDR */
704 if (!m_dup_pkthdr(n, m, how)) {
705 m_free(n);
706 goto nospace;
707 }
708 if ((n->m_flags & M_EXT) == 0)
709 nsize = MHLEN;
710 n->m_flags &= ~M_RDONLY;
711 }
712 n->m_len = 0;
713
714 /* Link it into the new chain */
715 *p = n;
716 p = &n->m_next;
717
718 /* Copy data from original mbuf(s) into new mbuf */
719 while (n->m_len < nsize && m != NULL) {
720 int chunk = min(nsize - n->m_len, m->m_len - moff);
721
722 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
723 moff += chunk;
724 n->m_len += chunk;
725 remain -= chunk;
726 if (moff == m->m_len) {
727 m = m->m_next;
728 moff = 0;
729 }
730 }
731
732 /* Check correct total mbuf length */
733 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
734 ("%s: bogus m_pkthdr.len", __func__));
735 }
736 return (top);
737
738 nospace:
739 m_freem(top);
740 return (NULL);
741 }
742
743 /*
744 * Concatenate mbuf chain n to m.
745 * Both chains must be of the same type (e.g. MT_DATA).
746 * Any m_pkthdr is not updated.
747 */
748 void
749 m_cat(struct mbuf *m, struct mbuf *n)
750 {
751 while (m->m_next)
752 m = m->m_next;
753 while (n) {
754 if (!M_WRITABLE(m) ||
755 (n->m_flags & M_EXTPG) != 0 ||
756 M_TRAILINGSPACE(m) < n->m_len) {
757 /* just join the two chains */
758 m->m_next = n;
759 return;
760 }
761 /* splat the data from one into the other */
762 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
763 (u_int)n->m_len);
764 m->m_len += n->m_len;
765 n = m_free(n);
766 }
767 }
768
769 /*
770 * Concatenate two pkthdr mbuf chains.
771 */
772 void
773 m_catpkt(struct mbuf *m, struct mbuf *n)
774 {
775
776 M_ASSERTPKTHDR(m);
777 M_ASSERTPKTHDR(n);
778
779 m->m_pkthdr.len += n->m_pkthdr.len;
780 m_demote(n, 1, 0);
781
782 m_cat(m, n);
783 }
784
785 void
786 m_adj(struct mbuf *mp, int req_len)
787 {
788 int len = req_len;
789 struct mbuf *m;
790 int count;
791
792 if ((m = mp) == NULL)
793 return;
794 if (len >= 0) {
795 /*
796 * Trim from head.
797 */
798 while (m != NULL && len > 0) {
799 if (m->m_len <= len) {
800 len -= m->m_len;
801 m->m_len = 0;
802 m = m->m_next;
803 } else {
804 m->m_len -= len;
805 m->m_data += len;
806 len = 0;
807 }
808 }
809 if (mp->m_flags & M_PKTHDR)
810 mp->m_pkthdr.len -= (req_len - len);
811 } else {
812 /*
813 * Trim from tail. Scan the mbuf chain,
814 * calculating its length and finding the last mbuf.
815 * If the adjustment only affects this mbuf, then just
816 * adjust and return. Otherwise, rescan and truncate
817 * after the remaining size.
818 */
819 len = -len;
820 count = 0;
821 for (;;) {
822 count += m->m_len;
823 if (m->m_next == (struct mbuf *)0)
824 break;
825 m = m->m_next;
826 }
827 if (m->m_len >= len) {
828 m->m_len -= len;
829 if (mp->m_flags & M_PKTHDR)
830 mp->m_pkthdr.len -= len;
831 return;
832 }
833 count -= len;
834 if (count < 0)
835 count = 0;
836 /*
837 * Correct length for chain is "count".
838 * Find the mbuf with last data, adjust its length,
839 * and toss data from remaining mbufs on chain.
840 */
841 m = mp;
842 if (m->m_flags & M_PKTHDR)
843 m->m_pkthdr.len = count;
844 for (; m; m = m->m_next) {
845 if (m->m_len >= count) {
846 m->m_len = count;
847 if (m->m_next != NULL) {
848 m_freem(m->m_next);
849 m->m_next = NULL;
850 }
851 break;
852 }
853 count -= m->m_len;
854 }
855 }
856 }
857
858 /*
859 * Rearange an mbuf chain so that len bytes are contiguous
860 * and in the data area of an mbuf (so that mtod will work
861 * for a structure of size len). Returns the resulting
862 * mbuf chain on success, frees it and returns null on failure.
863 * If there is room, it will add up to max_protohdr-len extra bytes to the
864 * contiguous region in an attempt to avoid being called next time.
865 */
866 struct mbuf *
867 m_pullup(struct mbuf *n, int len)
868 {
869 struct mbuf *m;
870 int count;
871 int space;
872
873 KASSERT((n->m_flags & M_EXTPG) == 0,
874 ("%s: unmapped mbuf %p", __func__, n));
875
876 /*
877 * If first mbuf has no cluster, and has room for len bytes
878 * without shifting current data, pullup into it,
879 * otherwise allocate a new mbuf to prepend to the chain.
880 */
881 if ((n->m_flags & M_EXT) == 0 &&
882 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
883 if (n->m_len >= len)
884 return (n);
885 m = n;
886 n = n->m_next;
887 len -= m->m_len;
888 } else {
889 if (len > MHLEN)
890 goto bad;
891 m = m_get(M_NOWAIT, n->m_type);
892 if (m == NULL)
893 goto bad;
894 if (n->m_flags & M_PKTHDR)
895 m_move_pkthdr(m, n);
896 }
897 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
898 do {
899 count = min(min(max(len, max_protohdr), space), n->m_len);
900 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
901 (u_int)count);
902 len -= count;
903 m->m_len += count;
904 n->m_len -= count;
905 space -= count;
906 if (n->m_len)
907 n->m_data += count;
908 else
909 n = m_free(n);
910 } while (len > 0 && n);
911 if (len > 0) {
912 (void) m_free(m);
913 goto bad;
914 }
915 m->m_next = n;
916 return (m);
917 bad:
918 m_freem(n);
919 return (NULL);
920 }
921
922 /*
923 * Like m_pullup(), except a new mbuf is always allocated, and we allow
924 * the amount of empty space before the data in the new mbuf to be specified
925 * (in the event that the caller expects to prepend later).
926 */
927 struct mbuf *
928 m_copyup(struct mbuf *n, int len, int dstoff)
929 {
930 struct mbuf *m;
931 int count, space;
932
933 if (len > (MHLEN - dstoff))
934 goto bad;
935 m = m_get(M_NOWAIT, n->m_type);
936 if (m == NULL)
937 goto bad;
938 if (n->m_flags & M_PKTHDR)
939 m_move_pkthdr(m, n);
940 m->m_data += dstoff;
941 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
942 do {
943 count = min(min(max(len, max_protohdr), space), n->m_len);
944 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
945 (unsigned)count);
946 len -= count;
947 m->m_len += count;
948 n->m_len -= count;
949 space -= count;
950 if (n->m_len)
951 n->m_data += count;
952 else
953 n = m_free(n);
954 } while (len > 0 && n);
955 if (len > 0) {
956 (void) m_free(m);
957 goto bad;
958 }
959 m->m_next = n;
960 return (m);
961 bad:
962 m_freem(n);
963 return (NULL);
964 }
965
966 /*
967 * Partition an mbuf chain in two pieces, returning the tail --
968 * all but the first len0 bytes. In case of failure, it returns NULL and
969 * attempts to restore the chain to its original state.
970 *
971 * Note that the resulting mbufs might be read-only, because the new
972 * mbuf can end up sharing an mbuf cluster with the original mbuf if
973 * the "breaking point" happens to lie within a cluster mbuf. Use the
974 * M_WRITABLE() macro to check for this case.
975 */
976 struct mbuf *
977 m_split(struct mbuf *m0, int len0, int wait)
978 {
979 struct mbuf *m, *n;
980 u_int len = len0, remain;
981
982 MBUF_CHECKSLEEP(wait);
983 for (m = m0; m && len > m->m_len; m = m->m_next)
984 len -= m->m_len;
985 if (m == NULL)
986 return (NULL);
987 remain = m->m_len - len;
988 if (m0->m_flags & M_PKTHDR && remain == 0) {
989 n = m_gethdr(wait, m0->m_type);
990 if (n == NULL)
991 return (NULL);
992 n->m_next = m->m_next;
993 m->m_next = NULL;
994 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) {
995 n->m_pkthdr.snd_tag =
996 m_snd_tag_ref(m0->m_pkthdr.snd_tag);
997 n->m_pkthdr.csum_flags |= CSUM_SND_TAG;
998 } else
999 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1000 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1001 m0->m_pkthdr.len = len0;
1002 return (n);
1003 } else if (m0->m_flags & M_PKTHDR) {
1004 n = m_gethdr(wait, m0->m_type);
1005 if (n == NULL)
1006 return (NULL);
1007 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) {
1008 n->m_pkthdr.snd_tag =
1009 m_snd_tag_ref(m0->m_pkthdr.snd_tag);
1010 n->m_pkthdr.csum_flags |= CSUM_SND_TAG;
1011 } else
1012 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1013 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1014 m0->m_pkthdr.len = len0;
1015 if (m->m_flags & (M_EXT|M_EXTPG))
1016 goto extpacket;
1017 if (remain > MHLEN) {
1018 /* m can't be the lead packet */
1019 M_ALIGN(n, 0);
1020 n->m_next = m_split(m, len, wait);
1021 if (n->m_next == NULL) {
1022 (void) m_free(n);
1023 return (NULL);
1024 } else {
1025 n->m_len = 0;
1026 return (n);
1027 }
1028 } else
1029 M_ALIGN(n, remain);
1030 } else if (remain == 0) {
1031 n = m->m_next;
1032 m->m_next = NULL;
1033 return (n);
1034 } else {
1035 n = m_get(wait, m->m_type);
1036 if (n == NULL)
1037 return (NULL);
1038 M_ALIGN(n, remain);
1039 }
1040 extpacket:
1041 if (m->m_flags & (M_EXT|M_EXTPG)) {
1042 n->m_data = m->m_data + len;
1043 mb_dupcl(n, m);
1044 } else {
1045 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1046 }
1047 n->m_len = remain;
1048 m->m_len = len;
1049 n->m_next = m->m_next;
1050 m->m_next = NULL;
1051 return (n);
1052 }
1053 /*
1054 * Routine to copy from device local memory into mbufs.
1055 * Note that `off' argument is offset into first mbuf of target chain from
1056 * which to begin copying the data to.
1057 */
1058 struct mbuf *
1059 m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
1060 void (*copy)(char *from, caddr_t to, u_int len))
1061 {
1062 struct mbuf *m;
1063 struct mbuf *top = NULL, **mp = ⊤
1064 int len;
1065
1066 if (off < 0 || off > MHLEN)
1067 return (NULL);
1068
1069 while (totlen > 0) {
1070 if (top == NULL) { /* First one, must be PKTHDR */
1071 if (totlen + off >= MINCLSIZE) {
1072 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1073 len = MCLBYTES;
1074 } else {
1075 m = m_gethdr(M_NOWAIT, MT_DATA);
1076 len = MHLEN;
1077
1078 /* Place initial small packet/header at end of mbuf */
1079 if (m && totlen + off + max_linkhdr <= MHLEN) {
1080 m->m_data += max_linkhdr;
1081 len -= max_linkhdr;
1082 }
1083 }
1084 if (m == NULL)
1085 return NULL;
1086 m->m_pkthdr.rcvif = ifp;
1087 m->m_pkthdr.len = totlen;
1088 } else {
1089 if (totlen + off >= MINCLSIZE) {
1090 m = m_getcl(M_NOWAIT, MT_DATA, 0);
1091 len = MCLBYTES;
1092 } else {
1093 m = m_get(M_NOWAIT, MT_DATA);
1094 len = MLEN;
1095 }
1096 if (m == NULL) {
1097 m_freem(top);
1098 return NULL;
1099 }
1100 }
1101 if (off) {
1102 m->m_data += off;
1103 len -= off;
1104 off = 0;
1105 }
1106 m->m_len = len = min(totlen, len);
1107 if (copy)
1108 copy(buf, mtod(m, caddr_t), (u_int)len);
1109 else
1110 bcopy(buf, mtod(m, caddr_t), (u_int)len);
1111 buf += len;
1112 *mp = m;
1113 mp = &m->m_next;
1114 totlen -= len;
1115 }
1116 return (top);
1117 }
1118
1119 /*
1120 * Copy data from a buffer back into the indicated mbuf chain,
1121 * starting "off" bytes from the beginning, extending the mbuf
1122 * chain if necessary.
1123 */
1124 void
1125 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
1126 {
1127 int mlen;
1128 struct mbuf *m = m0, *n;
1129 int totlen = 0;
1130
1131 if (m0 == NULL)
1132 return;
1133 while (off > (mlen = m->m_len)) {
1134 off -= mlen;
1135 totlen += mlen;
1136 if (m->m_next == NULL) {
1137 n = m_get(M_NOWAIT, m->m_type);
1138 if (n == NULL)
1139 goto out;
1140 bzero(mtod(n, caddr_t), MLEN);
1141 n->m_len = min(MLEN, len + off);
1142 m->m_next = n;
1143 }
1144 m = m->m_next;
1145 }
1146 while (len > 0) {
1147 if (m->m_next == NULL && (len > m->m_len - off)) {
1148 m->m_len += min(len - (m->m_len - off),
1149 M_TRAILINGSPACE(m));
1150 }
1151 mlen = min (m->m_len - off, len);
1152 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1153 cp += mlen;
1154 len -= mlen;
1155 mlen += off;
1156 off = 0;
1157 totlen += mlen;
1158 if (len == 0)
1159 break;
1160 if (m->m_next == NULL) {
1161 n = m_get(M_NOWAIT, m->m_type);
1162 if (n == NULL)
1163 break;
1164 n->m_len = min(MLEN, len);
1165 m->m_next = n;
1166 }
1167 m = m->m_next;
1168 }
1169 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1170 m->m_pkthdr.len = totlen;
1171 }
1172
1173 /*
1174 * Append the specified data to the indicated mbuf chain,
1175 * Extend the mbuf chain if the new data does not fit in
1176 * existing space.
1177 *
1178 * Return 1 if able to complete the job; otherwise 0.
1179 */
1180 int
1181 m_append(struct mbuf *m0, int len, c_caddr_t cp)
1182 {
1183 struct mbuf *m, *n;
1184 int remainder, space;
1185
1186 for (m = m0; m->m_next != NULL; m = m->m_next)
1187 ;
1188 remainder = len;
1189 space = M_TRAILINGSPACE(m);
1190 if (space > 0) {
1191 /*
1192 * Copy into available space.
1193 */
1194 if (space > remainder)
1195 space = remainder;
1196 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1197 m->m_len += space;
1198 cp += space, remainder -= space;
1199 }
1200 while (remainder > 0) {
1201 /*
1202 * Allocate a new mbuf; could check space
1203 * and allocate a cluster instead.
1204 */
1205 n = m_get(M_NOWAIT, m->m_type);
1206 if (n == NULL)
1207 break;
1208 n->m_len = min(MLEN, remainder);
1209 bcopy(cp, mtod(n, caddr_t), n->m_len);
1210 cp += n->m_len, remainder -= n->m_len;
1211 m->m_next = n;
1212 m = n;
1213 }
1214 if (m0->m_flags & M_PKTHDR)
1215 m0->m_pkthdr.len += len - remainder;
1216 return (remainder == 0);
1217 }
1218
1219 /*
1220 * Apply function f to the data in an mbuf chain starting "off" bytes from
1221 * the beginning, continuing for "len" bytes.
1222 */
1223 int
1224 m_apply(struct mbuf *m, int off, int len,
1225 int (*f)(void *, void *, u_int), void *arg)
1226 {
1227 u_int count;
1228 int rval;
1229
1230 KASSERT(off >= 0, ("m_apply, negative off %d", off));
1231 KASSERT(len >= 0, ("m_apply, negative len %d", len));
1232 while (off > 0) {
1233 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1234 if (off < m->m_len)
1235 break;
1236 off -= m->m_len;
1237 m = m->m_next;
1238 }
1239 while (len > 0) {
1240 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1241 count = min(m->m_len - off, len);
1242 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1243 if (rval)
1244 return (rval);
1245 len -= count;
1246 off = 0;
1247 m = m->m_next;
1248 }
1249 return (0);
1250 }
1251
1252 /*
1253 * Return a pointer to mbuf/offset of location in mbuf chain.
1254 */
1255 struct mbuf *
1256 m_getptr(struct mbuf *m, int loc, int *off)
1257 {
1258
1259 while (loc >= 0) {
1260 /* Normal end of search. */
1261 if (m->m_len > loc) {
1262 *off = loc;
1263 return (m);
1264 } else {
1265 loc -= m->m_len;
1266 if (m->m_next == NULL) {
1267 if (loc == 0) {
1268 /* Point at the end of valid data. */
1269 *off = m->m_len;
1270 return (m);
1271 }
1272 return (NULL);
1273 }
1274 m = m->m_next;
1275 }
1276 }
1277 return (NULL);
1278 }
1279
1280 void
1281 m_print(const struct mbuf *m, int maxlen)
1282 {
1283 int len;
1284 int pdata;
1285 const struct mbuf *m2;
1286
1287 if (m == NULL) {
1288 printf("mbuf: %p\n", m);
1289 return;
1290 }
1291
1292 if (m->m_flags & M_PKTHDR)
1293 len = m->m_pkthdr.len;
1294 else
1295 len = -1;
1296 m2 = m;
1297 while (m2 != NULL && (len == -1 || len)) {
1298 pdata = m2->m_len;
1299 if (maxlen != -1 && pdata > maxlen)
1300 pdata = maxlen;
1301 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1302 m2->m_next, m2->m_flags, "\2\20freelist\17skipfw"
1303 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1304 "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1305 if (pdata)
1306 printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1307 if (len != -1)
1308 len -= m2->m_len;
1309 m2 = m2->m_next;
1310 }
1311 if (len > 0)
1312 printf("%d bytes unaccounted for.\n", len);
1313 return;
1314 }
1315
1316 u_int
1317 m_fixhdr(struct mbuf *m0)
1318 {
1319 u_int len;
1320
1321 len = m_length(m0, NULL);
1322 m0->m_pkthdr.len = len;
1323 return (len);
1324 }
1325
1326 u_int
1327 m_length(struct mbuf *m0, struct mbuf **last)
1328 {
1329 struct mbuf *m;
1330 u_int len;
1331
1332 len = 0;
1333 for (m = m0; m != NULL; m = m->m_next) {
1334 len += m->m_len;
1335 if (m->m_next == NULL)
1336 break;
1337 }
1338 if (last != NULL)
1339 *last = m;
1340 return (len);
1341 }
1342
1343 /*
1344 * Defragment a mbuf chain, returning the shortest possible
1345 * chain of mbufs and clusters. If allocation fails and
1346 * this cannot be completed, NULL will be returned, but
1347 * the passed in chain will be unchanged. Upon success,
1348 * the original chain will be freed, and the new chain
1349 * will be returned.
1350 *
1351 * If a non-packet header is passed in, the original
1352 * mbuf (chain?) will be returned unharmed.
1353 */
1354 struct mbuf *
1355 m_defrag(struct mbuf *m0, int how)
1356 {
1357 struct mbuf *m_new = NULL, *m_final = NULL;
1358 int progress = 0, length;
1359
1360 MBUF_CHECKSLEEP(how);
1361 if (!(m0->m_flags & M_PKTHDR))
1362 return (m0);
1363
1364 m_fixhdr(m0); /* Needed sanity check */
1365
1366 #ifdef MBUF_STRESS_TEST
1367 if (m_defragrandomfailures) {
1368 int temp = arc4random() & 0xff;
1369 if (temp == 0xba)
1370 goto nospace;
1371 }
1372 #endif
1373
1374 if (m0->m_pkthdr.len > MHLEN)
1375 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1376 else
1377 m_final = m_gethdr(how, MT_DATA);
1378
1379 if (m_final == NULL)
1380 goto nospace;
1381
1382 if (m_dup_pkthdr(m_final, m0, how) == 0)
1383 goto nospace;
1384
1385 m_new = m_final;
1386
1387 while (progress < m0->m_pkthdr.len) {
1388 length = m0->m_pkthdr.len - progress;
1389 if (length > MCLBYTES)
1390 length = MCLBYTES;
1391
1392 if (m_new == NULL) {
1393 if (length > MLEN)
1394 m_new = m_getcl(how, MT_DATA, 0);
1395 else
1396 m_new = m_get(how, MT_DATA);
1397 if (m_new == NULL)
1398 goto nospace;
1399 }
1400
1401 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1402 progress += length;
1403 m_new->m_len = length;
1404 if (m_new != m_final)
1405 m_cat(m_final, m_new);
1406 m_new = NULL;
1407 }
1408 #ifdef MBUF_STRESS_TEST
1409 if (m0->m_next == NULL)
1410 m_defraguseless++;
1411 #endif
1412 m_freem(m0);
1413 m0 = m_final;
1414 #ifdef MBUF_STRESS_TEST
1415 m_defragpackets++;
1416 m_defragbytes += m0->m_pkthdr.len;
1417 #endif
1418 return (m0);
1419 nospace:
1420 #ifdef MBUF_STRESS_TEST
1421 m_defragfailure++;
1422 #endif
1423 if (m_final)
1424 m_freem(m_final);
1425 return (NULL);
1426 }
1427
1428 /*
1429 * Return the number of fragments an mbuf will use. This is usually
1430 * used as a proxy for the number of scatter/gather elements needed by
1431 * a DMA engine to access an mbuf. In general mapped mbufs are
1432 * assumed to be backed by physically contiguous buffers that only
1433 * need a single fragment. Unmapped mbufs, on the other hand, can
1434 * span disjoint physical pages.
1435 */
1436 static int
1437 frags_per_mbuf(struct mbuf *m)
1438 {
1439 int frags;
1440
1441 if ((m->m_flags & M_EXTPG) == 0)
1442 return (1);
1443
1444 /*
1445 * The header and trailer are counted as a single fragment
1446 * each when present.
1447 *
1448 * XXX: This overestimates the number of fragments by assuming
1449 * all the backing physical pages are disjoint.
1450 */
1451 frags = 0;
1452 if (m->m_epg_hdrlen != 0)
1453 frags++;
1454 frags += m->m_epg_npgs;
1455 if (m->m_epg_trllen != 0)
1456 frags++;
1457
1458 return (frags);
1459 }
1460
1461 /*
1462 * Defragment an mbuf chain, returning at most maxfrags separate
1463 * mbufs+clusters. If this is not possible NULL is returned and
1464 * the original mbuf chain is left in its present (potentially
1465 * modified) state. We use two techniques: collapsing consecutive
1466 * mbufs and replacing consecutive mbufs by a cluster.
1467 *
1468 * NB: this should really be named m_defrag but that name is taken
1469 */
1470 struct mbuf *
1471 m_collapse(struct mbuf *m0, int how, int maxfrags)
1472 {
1473 struct mbuf *m, *n, *n2, **prev;
1474 u_int curfrags;
1475
1476 /*
1477 * Calculate the current number of frags.
1478 */
1479 curfrags = 0;
1480 for (m = m0; m != NULL; m = m->m_next)
1481 curfrags += frags_per_mbuf(m);
1482 /*
1483 * First, try to collapse mbufs. Note that we always collapse
1484 * towards the front so we don't need to deal with moving the
1485 * pkthdr. This may be suboptimal if the first mbuf has much
1486 * less data than the following.
1487 */
1488 m = m0;
1489 again:
1490 for (;;) {
1491 n = m->m_next;
1492 if (n == NULL)
1493 break;
1494 if (M_WRITABLE(m) &&
1495 n->m_len < M_TRAILINGSPACE(m)) {
1496 m_copydata(n, 0, n->m_len,
1497 mtod(m, char *) + m->m_len);
1498 m->m_len += n->m_len;
1499 m->m_next = n->m_next;
1500 curfrags -= frags_per_mbuf(n);
1501 m_free(n);
1502 if (curfrags <= maxfrags)
1503 return m0;
1504 } else
1505 m = n;
1506 }
1507 KASSERT(maxfrags > 1,
1508 ("maxfrags %u, but normal collapse failed", maxfrags));
1509 /*
1510 * Collapse consecutive mbufs to a cluster.
1511 */
1512 prev = &m0->m_next; /* NB: not the first mbuf */
1513 while ((n = *prev) != NULL) {
1514 if ((n2 = n->m_next) != NULL &&
1515 n->m_len + n2->m_len < MCLBYTES) {
1516 m = m_getcl(how, MT_DATA, 0);
1517 if (m == NULL)
1518 goto bad;
1519 m_copydata(n, 0, n->m_len, mtod(m, char *));
1520 m_copydata(n2, 0, n2->m_len,
1521 mtod(m, char *) + n->m_len);
1522 m->m_len = n->m_len + n2->m_len;
1523 m->m_next = n2->m_next;
1524 *prev = m;
1525 curfrags += 1; /* For the new cluster */
1526 curfrags -= frags_per_mbuf(n);
1527 curfrags -= frags_per_mbuf(n2);
1528 m_free(n);
1529 m_free(n2);
1530 if (curfrags <= maxfrags)
1531 return m0;
1532 /*
1533 * Still not there, try the normal collapse
1534 * again before we allocate another cluster.
1535 */
1536 goto again;
1537 }
1538 prev = &n->m_next;
1539 }
1540 /*
1541 * No place where we can collapse to a cluster; punt.
1542 * This can occur if, for example, you request 2 frags
1543 * but the packet requires that both be clusters (we
1544 * never reallocate the first mbuf to avoid moving the
1545 * packet header).
1546 */
1547 bad:
1548 return NULL;
1549 }
1550
1551 #ifdef MBUF_STRESS_TEST
1552
1553 /*
1554 * Fragment an mbuf chain. There's no reason you'd ever want to do
1555 * this in normal usage, but it's great for stress testing various
1556 * mbuf consumers.
1557 *
1558 * If fragmentation is not possible, the original chain will be
1559 * returned.
1560 *
1561 * Possible length values:
1562 * 0 no fragmentation will occur
1563 * > 0 each fragment will be of the specified length
1564 * -1 each fragment will be the same random value in length
1565 * -2 each fragment's length will be entirely random
1566 * (Random values range from 1 to 256)
1567 */
1568 struct mbuf *
1569 m_fragment(struct mbuf *m0, int how, int length)
1570 {
1571 struct mbuf *m_first, *m_last;
1572 int divisor = 255, progress = 0, fraglen;
1573
1574 if (!(m0->m_flags & M_PKTHDR))
1575 return (m0);
1576
1577 if (length == 0 || length < -2)
1578 return (m0);
1579 if (length > MCLBYTES)
1580 length = MCLBYTES;
1581 if (length < 0 && divisor > MCLBYTES)
1582 divisor = MCLBYTES;
1583 if (length == -1)
1584 length = 1 + (arc4random() % divisor);
1585 if (length > 0)
1586 fraglen = length;
1587
1588 m_fixhdr(m0); /* Needed sanity check */
1589
1590 m_first = m_getcl(how, MT_DATA, M_PKTHDR);
1591 if (m_first == NULL)
1592 goto nospace;
1593
1594 if (m_dup_pkthdr(m_first, m0, how) == 0)
1595 goto nospace;
1596
1597 m_last = m_first;
1598
1599 while (progress < m0->m_pkthdr.len) {
1600 if (length == -2)
1601 fraglen = 1 + (arc4random() % divisor);
1602 if (fraglen > m0->m_pkthdr.len - progress)
1603 fraglen = m0->m_pkthdr.len - progress;
1604
1605 if (progress != 0) {
1606 struct mbuf *m_new = m_getcl(how, MT_DATA, 0);
1607 if (m_new == NULL)
1608 goto nospace;
1609
1610 m_last->m_next = m_new;
1611 m_last = m_new;
1612 }
1613
1614 m_copydata(m0, progress, fraglen, mtod(m_last, caddr_t));
1615 progress += fraglen;
1616 m_last->m_len = fraglen;
1617 }
1618 m_freem(m0);
1619 m0 = m_first;
1620 return (m0);
1621 nospace:
1622 if (m_first)
1623 m_freem(m_first);
1624 /* Return the original chain on failure */
1625 return (m0);
1626 }
1627
1628 #endif
1629
1630 /*
1631 * Free pages from mbuf_ext_pgs, assuming they were allocated via
1632 * vm_page_alloc() and aren't associated with any object. Complement
1633 * to allocator from m_uiotombuf_nomap().
1634 */
1635 void
1636 mb_free_mext_pgs(struct mbuf *m)
1637 {
1638 vm_page_t pg;
1639
1640 M_ASSERTEXTPG(m);
1641 for (int i = 0; i < m->m_epg_npgs; i++) {
1642 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
1643 vm_page_unwire_noq(pg);
1644 vm_page_free(pg);
1645 }
1646 }
1647
1648 static struct mbuf *
1649 m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags)
1650 {
1651 struct mbuf *m, *mb, *prev;
1652 vm_page_t pg_array[MBUF_PEXT_MAX_PGS];
1653 int error, length, i, needed;
1654 ssize_t total;
1655 int pflags = malloc2vm_flags(how) | VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP |
1656 VM_ALLOC_WIRED;
1657
1658 MPASS((flags & M_PKTHDR) == 0);
1659
1660 /*
1661 * len can be zero or an arbitrary large value bound by
1662 * the total data supplied by the uio.
1663 */
1664 if (len > 0)
1665 total = MIN(uio->uio_resid, len);
1666 else
1667 total = uio->uio_resid;
1668
1669 if (maxseg == 0)
1670 maxseg = MBUF_PEXT_MAX_PGS * PAGE_SIZE;
1671
1672 /*
1673 * If total is zero, return an empty mbuf. This can occur
1674 * for TLS 1.0 connections which send empty fragments as
1675 * a countermeasure against the known-IV weakness in CBC
1676 * ciphersuites.
1677 */
1678 if (__predict_false(total == 0)) {
1679 mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs);
1680 if (mb == NULL)
1681 return (NULL);
1682 mb->m_epg_flags = EPG_FLAG_ANON;
1683 return (mb);
1684 }
1685
1686 /*
1687 * Allocate the pages
1688 */
1689 m = NULL;
1690 while (total > 0) {
1691 mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs);
1692 if (mb == NULL)
1693 goto failed;
1694 if (m == NULL)
1695 m = mb;
1696 else
1697 prev->m_next = mb;
1698 prev = mb;
1699 mb->m_epg_flags = EPG_FLAG_ANON;
1700 needed = length = MIN(maxseg, total);
1701 for (i = 0; needed > 0; i++, needed -= PAGE_SIZE) {
1702 retry_page:
1703 pg_array[i] = vm_page_alloc(NULL, 0, pflags);
1704 if (pg_array[i] == NULL) {
1705 if (how & M_NOWAIT) {
1706 goto failed;
1707 } else {
1708 vm_wait(NULL);
1709 goto retry_page;
1710 }
1711 }
1712 pg_array[i]->flags &= ~PG_ZERO;
1713 mb->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg_array[i]);
1714 mb->m_epg_npgs++;
1715 }
1716 mb->m_epg_last_len = length - PAGE_SIZE * (mb->m_epg_npgs - 1);
1717 MBUF_EXT_PGS_ASSERT_SANITY(mb);
1718 total -= length;
1719 error = uiomove_fromphys(pg_array, 0, length, uio);
1720 if (error != 0)
1721 goto failed;
1722 mb->m_len = length;
1723 mb->m_ext.ext_size += PAGE_SIZE * mb->m_epg_npgs;
1724 if (flags & M_PKTHDR)
1725 m->m_pkthdr.len += length;
1726 }
1727 return (m);
1728
1729 failed:
1730 m_freem(m);
1731 return (NULL);
1732 }
1733
1734 /*
1735 * Copy the contents of uio into a properly sized mbuf chain.
1736 */
1737 struct mbuf *
1738 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1739 {
1740 struct mbuf *m, *mb;
1741 int error, length;
1742 ssize_t total;
1743 int progress = 0;
1744
1745 if (flags & M_EXTPG)
1746 return (m_uiotombuf_nomap(uio, how, len, align, flags));
1747
1748 /*
1749 * len can be zero or an arbitrary large value bound by
1750 * the total data supplied by the uio.
1751 */
1752 if (len > 0)
1753 total = (uio->uio_resid < len) ? uio->uio_resid : len;
1754 else
1755 total = uio->uio_resid;
1756
1757 /*
1758 * The smallest unit returned by m_getm2() is a single mbuf
1759 * with pkthdr. We can't align past it.
1760 */
1761 if (align >= MHLEN)
1762 return (NULL);
1763
1764 /*
1765 * Give us the full allocation or nothing.
1766 * If len is zero return the smallest empty mbuf.
1767 */
1768 m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1769 if (m == NULL)
1770 return (NULL);
1771 m->m_data += align;
1772
1773 /* Fill all mbufs with uio data and update header information. */
1774 for (mb = m; mb != NULL; mb = mb->m_next) {
1775 length = min(M_TRAILINGSPACE(mb), total - progress);
1776
1777 error = uiomove(mtod(mb, void *), length, uio);
1778 if (error) {
1779 m_freem(m);
1780 return (NULL);
1781 }
1782
1783 mb->m_len = length;
1784 progress += length;
1785 if (flags & M_PKTHDR)
1786 m->m_pkthdr.len += length;
1787 }
1788 KASSERT(progress == total, ("%s: progress != total", __func__));
1789
1790 return (m);
1791 }
1792
1793 /*
1794 * Copy data from an unmapped mbuf into a uio limited by len if set.
1795 */
1796 int
1797 m_unmappedtouio(const struct mbuf *m, int m_off, struct uio *uio, int len)
1798 {
1799 vm_page_t pg;
1800 int error, i, off, pglen, pgoff, seglen, segoff;
1801
1802 M_ASSERTEXTPG(m);
1803 error = 0;
1804
1805 /* Skip over any data removed from the front. */
1806 off = mtod(m, vm_offset_t);
1807
1808 off += m_off;
1809 if (m->m_epg_hdrlen != 0) {
1810 if (off >= m->m_epg_hdrlen) {
1811 off -= m->m_epg_hdrlen;
1812 } else {
1813 seglen = m->m_epg_hdrlen - off;
1814 segoff = off;
1815 seglen = min(seglen, len);
1816 off = 0;
1817 len -= seglen;
1818 error = uiomove(__DECONST(void *,
1819 &m->m_epg_hdr[segoff]), seglen, uio);
1820 }
1821 }
1822 pgoff = m->m_epg_1st_off;
1823 for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
1824 pglen = m_epg_pagelen(m, i, pgoff);
1825 if (off >= pglen) {
1826 off -= pglen;
1827 pgoff = 0;
1828 continue;
1829 }
1830 seglen = pglen - off;
1831 segoff = pgoff + off;
1832 off = 0;
1833 seglen = min(seglen, len);
1834 len -= seglen;
1835 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
1836 error = uiomove_fromphys(&pg, segoff, seglen, uio);
1837 pgoff = 0;
1838 };
1839 if (len != 0 && error == 0) {
1840 KASSERT((off + len) <= m->m_epg_trllen,
1841 ("off + len > trail (%d + %d > %d, m_off = %d)", off, len,
1842 m->m_epg_trllen, m_off));
1843 error = uiomove(__DECONST(void *, &m->m_epg_trail[off]),
1844 len, uio);
1845 }
1846 return (error);
1847 }
1848
1849 /*
1850 * Copy an mbuf chain into a uio limited by len if set.
1851 */
1852 int
1853 m_mbuftouio(struct uio *uio, const struct mbuf *m, int len)
1854 {
1855 int error, length, total;
1856 int progress = 0;
1857
1858 if (len > 0)
1859 total = min(uio->uio_resid, len);
1860 else
1861 total = uio->uio_resid;
1862
1863 /* Fill the uio with data from the mbufs. */
1864 for (; m != NULL; m = m->m_next) {
1865 length = min(m->m_len, total - progress);
1866
1867 if ((m->m_flags & M_EXTPG) != 0)
1868 error = m_unmappedtouio(m, 0, uio, length);
1869 else
1870 error = uiomove(mtod(m, void *), length, uio);
1871 if (error)
1872 return (error);
1873
1874 progress += length;
1875 }
1876
1877 return (0);
1878 }
1879
1880 /*
1881 * Create a writable copy of the mbuf chain. While doing this
1882 * we compact the chain with a goal of producing a chain with
1883 * at most two mbufs. The second mbuf in this chain is likely
1884 * to be a cluster. The primary purpose of this work is to create
1885 * a writable packet for encryption, compression, etc. The
1886 * secondary goal is to linearize the data so the data can be
1887 * passed to crypto hardware in the most efficient manner possible.
1888 */
1889 struct mbuf *
1890 m_unshare(struct mbuf *m0, int how)
1891 {
1892 struct mbuf *m, *mprev;
1893 struct mbuf *n, *mfirst, *mlast;
1894 int len, off;
1895
1896 mprev = NULL;
1897 for (m = m0; m != NULL; m = mprev->m_next) {
1898 /*
1899 * Regular mbufs are ignored unless there's a cluster
1900 * in front of it that we can use to coalesce. We do
1901 * the latter mainly so later clusters can be coalesced
1902 * also w/o having to handle them specially (i.e. convert
1903 * mbuf+cluster -> cluster). This optimization is heavily
1904 * influenced by the assumption that we're running over
1905 * Ethernet where MCLBYTES is large enough that the max
1906 * packet size will permit lots of coalescing into a
1907 * single cluster. This in turn permits efficient
1908 * crypto operations, especially when using hardware.
1909 */
1910 if ((m->m_flags & M_EXT) == 0) {
1911 if (mprev && (mprev->m_flags & M_EXT) &&
1912 m->m_len <= M_TRAILINGSPACE(mprev)) {
1913 /* XXX: this ignores mbuf types */
1914 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1915 mtod(m, caddr_t), m->m_len);
1916 mprev->m_len += m->m_len;
1917 mprev->m_next = m->m_next; /* unlink from chain */
1918 m_free(m); /* reclaim mbuf */
1919 } else {
1920 mprev = m;
1921 }
1922 continue;
1923 }
1924 /*
1925 * Writable mbufs are left alone (for now).
1926 */
1927 if (M_WRITABLE(m)) {
1928 mprev = m;
1929 continue;
1930 }
1931
1932 /*
1933 * Not writable, replace with a copy or coalesce with
1934 * the previous mbuf if possible (since we have to copy
1935 * it anyway, we try to reduce the number of mbufs and
1936 * clusters so that future work is easier).
1937 */
1938 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1939 /* NB: we only coalesce into a cluster or larger */
1940 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1941 m->m_len <= M_TRAILINGSPACE(mprev)) {
1942 /* XXX: this ignores mbuf types */
1943 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1944 mtod(m, caddr_t), m->m_len);
1945 mprev->m_len += m->m_len;
1946 mprev->m_next = m->m_next; /* unlink from chain */
1947 m_free(m); /* reclaim mbuf */
1948 continue;
1949 }
1950
1951 /*
1952 * Allocate new space to hold the copy and copy the data.
1953 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
1954 * splitting them into clusters. We could just malloc a
1955 * buffer and make it external but too many device drivers
1956 * don't know how to break up the non-contiguous memory when
1957 * doing DMA.
1958 */
1959 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1960 if (n == NULL) {
1961 m_freem(m0);
1962 return (NULL);
1963 }
1964 if (m->m_flags & M_PKTHDR) {
1965 KASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR",
1966 __func__, m0, m));
1967 m_move_pkthdr(n, m);
1968 }
1969 len = m->m_len;
1970 off = 0;
1971 mfirst = n;
1972 mlast = NULL;
1973 for (;;) {
1974 int cc = min(len, MCLBYTES);
1975 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1976 n->m_len = cc;
1977 if (mlast != NULL)
1978 mlast->m_next = n;
1979 mlast = n;
1980 #if 0
1981 newipsecstat.ips_clcopied++;
1982 #endif
1983
1984 len -= cc;
1985 if (len <= 0)
1986 break;
1987 off += cc;
1988
1989 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1990 if (n == NULL) {
1991 m_freem(mfirst);
1992 m_freem(m0);
1993 return (NULL);
1994 }
1995 }
1996 n->m_next = m->m_next;
1997 if (mprev == NULL)
1998 m0 = mfirst; /* new head of chain */
1999 else
2000 mprev->m_next = mfirst; /* replace old mbuf */
2001 m_free(m); /* release old mbuf */
2002 mprev = mfirst;
2003 }
2004 return (m0);
2005 }
2006
2007 #ifdef MBUF_PROFILING
2008
2009 #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/
2010 struct mbufprofile {
2011 uintmax_t wasted[MP_BUCKETS];
2012 uintmax_t used[MP_BUCKETS];
2013 uintmax_t segments[MP_BUCKETS];
2014 } mbprof;
2015
2016 #define MP_MAXDIGITS 21 /* strlen("16,000,000,000,000,000,000") == 21 */
2017 #define MP_NUMLINES 6
2018 #define MP_NUMSPERLINE 16
2019 #define MP_EXTRABYTES 64 /* > strlen("used:\nwasted:\nsegments:\n") */
2020 /* work out max space needed and add a bit of spare space too */
2021 #define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE)
2022 #define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES)
2023
2024 char mbprofbuf[MP_BUFSIZE];
2025
2026 void
2027 m_profile(struct mbuf *m)
2028 {
2029 int segments = 0;
2030 int used = 0;
2031 int wasted = 0;
2032
2033 while (m) {
2034 segments++;
2035 used += m->m_len;
2036 if (m->m_flags & M_EXT) {
2037 wasted += MHLEN - sizeof(m->m_ext) +
2038 m->m_ext.ext_size - m->m_len;
2039 } else {
2040 if (m->m_flags & M_PKTHDR)
2041 wasted += MHLEN - m->m_len;
2042 else
2043 wasted += MLEN - m->m_len;
2044 }
2045 m = m->m_next;
2046 }
2047 /* be paranoid.. it helps */
2048 if (segments > MP_BUCKETS - 1)
2049 segments = MP_BUCKETS - 1;
2050 if (used > 100000)
2051 used = 100000;
2052 if (wasted > 100000)
2053 wasted = 100000;
2054 /* store in the appropriate bucket */
2055 /* don't bother locking. if it's slightly off, so what? */
2056 mbprof.segments[segments]++;
2057 mbprof.used[fls(used)]++;
2058 mbprof.wasted[fls(wasted)]++;
2059 }
2060
2061 static void
2062 mbprof_textify(void)
2063 {
2064 int offset;
2065 char *c;
2066 uint64_t *p;
2067
2068 p = &mbprof.wasted[0];
2069 c = mbprofbuf;
2070 offset = snprintf(c, MP_MAXLINE + 10,
2071 "wasted:\n"
2072 "%ju %ju %ju %ju %ju %ju %ju %ju "
2073 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2074 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2075 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2076 #ifdef BIG_ARRAY
2077 p = &mbprof.wasted[16];
2078 c += offset;
2079 offset = snprintf(c, MP_MAXLINE,
2080 "%ju %ju %ju %ju %ju %ju %ju %ju "
2081 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2082 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2083 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2084 #endif
2085 p = &mbprof.used[0];
2086 c += offset;
2087 offset = snprintf(c, MP_MAXLINE + 10,
2088 "used:\n"
2089 "%ju %ju %ju %ju %ju %ju %ju %ju "
2090 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2091 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2092 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2093 #ifdef BIG_ARRAY
2094 p = &mbprof.used[16];
2095 c += offset;
2096 offset = snprintf(c, MP_MAXLINE,
2097 "%ju %ju %ju %ju %ju %ju %ju %ju "
2098 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2099 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2100 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2101 #endif
2102 p = &mbprof.segments[0];
2103 c += offset;
2104 offset = snprintf(c, MP_MAXLINE + 10,
2105 "segments:\n"
2106 "%ju %ju %ju %ju %ju %ju %ju %ju "
2107 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2108 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2109 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2110 #ifdef BIG_ARRAY
2111 p = &mbprof.segments[16];
2112 c += offset;
2113 offset = snprintf(c, MP_MAXLINE,
2114 "%ju %ju %ju %ju %ju %ju %ju %ju "
2115 "%ju %ju %ju %ju %ju %ju %ju %jju",
2116 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2117 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2118 #endif
2119 }
2120
2121 static int
2122 mbprof_handler(SYSCTL_HANDLER_ARGS)
2123 {
2124 int error;
2125
2126 mbprof_textify();
2127 error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1);
2128 return (error);
2129 }
2130
2131 static int
2132 mbprof_clr_handler(SYSCTL_HANDLER_ARGS)
2133 {
2134 int clear, error;
2135
2136 clear = 0;
2137 error = sysctl_handle_int(oidp, &clear, 0, req);
2138 if (error || !req->newptr)
2139 return (error);
2140
2141 if (clear) {
2142 bzero(&mbprof, sizeof(mbprof));
2143 }
2144
2145 return (error);
2146 }
2147
2148 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile,
2149 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0,
2150 mbprof_handler, "A",
2151 "mbuf profiling statistics");
2152
2153 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr,
2154 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,
2155 mbprof_clr_handler, "I",
2156 "clear mbuf profiling statistics");
2157 #endif
Cache object: c9c9d880f9dc4e27dcf987e4e86250eb
|