FreeBSD/Linux Kernel Cross Reference
sys/kern/uipc_mbuf.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_param.h"
38 #include "opt_mbuf_stress_test.h"
39 #include "opt_mbuf_profiling.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/limits.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/sysctl.h>
49 #include <sys/domain.h>
50 #include <sys/protosw.h>
51 #include <sys/uio.h>
52 #include <sys/vmmeter.h>
53 #include <sys/sdt.h>
54 #include <vm/vm.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_page.h>
57
58 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__init,
59 "struct mbuf *", "mbufinfo_t *",
60 "uint32_t", "uint32_t",
61 "uint16_t", "uint16_t",
62 "uint32_t", "uint32_t",
63 "uint32_t", "uint32_t");
64
65 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__gethdr,
66 "uint32_t", "uint32_t",
67 "uint16_t", "uint16_t",
68 "struct mbuf *", "mbufinfo_t *");
69
70 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__get,
71 "uint32_t", "uint32_t",
72 "uint16_t", "uint16_t",
73 "struct mbuf *", "mbufinfo_t *");
74
75 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__getcl,
76 "uint32_t", "uint32_t",
77 "uint16_t", "uint16_t",
78 "uint32_t", "uint32_t",
79 "struct mbuf *", "mbufinfo_t *");
80
81 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__getjcl,
82 "uint32_t", "uint32_t",
83 "uint16_t", "uint16_t",
84 "uint32_t", "uint32_t",
85 "uint32_t", "uint32_t",
86 "struct mbuf *", "mbufinfo_t *");
87
88 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__clget,
89 "struct mbuf *", "mbufinfo_t *",
90 "uint32_t", "uint32_t",
91 "uint32_t", "uint32_t");
92
93 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__cljget,
94 "struct mbuf *", "mbufinfo_t *",
95 "uint32_t", "uint32_t",
96 "uint32_t", "uint32_t",
97 "void*", "void*");
98
99 SDT_PROBE_DEFINE(sdt, , , m__cljset);
100
101 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__free,
102 "struct mbuf *", "mbufinfo_t *");
103
104 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__freem,
105 "struct mbuf *", "mbufinfo_t *");
106
107 #include <security/mac/mac_framework.h>
108
109 int max_linkhdr;
110 int max_protohdr;
111 int max_hdr;
112 int max_datalen;
113 #ifdef MBUF_STRESS_TEST
114 int m_defragpackets;
115 int m_defragbytes;
116 int m_defraguseless;
117 int m_defragfailure;
118 int m_defragrandomfailures;
119 #endif
120
121 /*
122 * sysctl(8) exported objects
123 */
124 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
125 &max_linkhdr, 0, "Size of largest link layer header");
126 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
127 &max_protohdr, 0, "Size of largest protocol layer header");
128 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
129 &max_hdr, 0, "Size of largest link plus protocol header");
130 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
131 &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
132 #ifdef MBUF_STRESS_TEST
133 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
134 &m_defragpackets, 0, "");
135 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
136 &m_defragbytes, 0, "");
137 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
138 &m_defraguseless, 0, "");
139 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
140 &m_defragfailure, 0, "");
141 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
142 &m_defragrandomfailures, 0, "");
143 #endif
144
145 /*
146 * Ensure the correct size of various mbuf parameters. It could be off due
147 * to compiler-induced padding and alignment artifacts.
148 */
149 CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN);
150 CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN);
151
152 /*
153 * mbuf data storage should be 64-bit aligned regardless of architectural
154 * pointer size; check this is the case with and without a packet header.
155 */
156 CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0);
157 CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0);
158
159 /*
160 * While the specific values here don't matter too much (i.e., +/- a few
161 * words), we do want to ensure that changes to these values are carefully
162 * reasoned about and properly documented. This is especially the case as
163 * network-protocol and device-driver modules encode these layouts, and must
164 * be recompiled if the structures change. Check these values at compile time
165 * against the ones documented in comments in mbuf.h.
166 *
167 * NB: Possibly they should be documented there via #define's and not just
168 * comments.
169 */
170 #if defined(__LP64__)
171 CTASSERT(offsetof(struct mbuf, m_dat) == 32);
172 CTASSERT(sizeof(struct pkthdr) == 56);
173 CTASSERT(sizeof(struct m_ext) == 160);
174 #else
175 CTASSERT(offsetof(struct mbuf, m_dat) == 24);
176 CTASSERT(sizeof(struct pkthdr) == 48);
177 #if defined(__powerpc__) && defined(BOOKE)
178 /* PowerPC booke has 64-bit physical pointers. */
179 CTASSERT(sizeof(struct m_ext) == 184);
180 #else
181 CTASSERT(sizeof(struct m_ext) == 180);
182 #endif
183 #endif
184
185 /*
186 * Assert that the queue(3) macros produce code of the same size as an old
187 * plain pointer does.
188 */
189 #ifdef INVARIANTS
190 static struct mbuf __used m_assertbuf;
191 CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next));
192 CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next));
193 CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt));
194 CTASSERT(sizeof(m_assertbuf.m_stailqpkt) == sizeof(m_assertbuf.m_nextpkt));
195 #endif
196
197 /*
198 * Attach the cluster from *m to *n, set up m_ext in *n
199 * and bump the refcount of the cluster.
200 */
201 void
202 mb_dupcl(struct mbuf *n, struct mbuf *m)
203 {
204 volatile u_int *refcnt;
205
206 KASSERT(m->m_flags & (M_EXT|M_EXTPG),
207 ("%s: M_EXT|M_EXTPG not set on %p", __func__, m));
208 KASSERT(!(n->m_flags & (M_EXT|M_EXTPG)),
209 ("%s: M_EXT|M_EXTPG set on %p", __func__, n));
210
211 /*
212 * Cache access optimization.
213 *
214 * o Regular M_EXT storage doesn't need full copy of m_ext, since
215 * the holder of the 'ext_count' is responsible to carry the free
216 * routine and its arguments.
217 * o M_EXTPG data is split between main part of mbuf and m_ext, the
218 * main part is copied in full, the m_ext part is similar to M_EXT.
219 * o EXT_EXTREF, where 'ext_cnt' doesn't point into mbuf at all, is
220 * special - it needs full copy of m_ext into each mbuf, since any
221 * copy could end up as the last to free.
222 */
223 if (m->m_flags & M_EXTPG) {
224 bcopy(&m->m_epg_startcopy, &n->m_epg_startcopy,
225 __rangeof(struct mbuf, m_epg_startcopy, m_epg_endcopy));
226 bcopy(&m->m_ext, &n->m_ext, m_epg_ext_copylen);
227 } else if (m->m_ext.ext_type == EXT_EXTREF)
228 bcopy(&m->m_ext, &n->m_ext, sizeof(struct m_ext));
229 else
230 bcopy(&m->m_ext, &n->m_ext, m_ext_copylen);
231
232 n->m_flags |= m->m_flags & (M_RDONLY | M_EXT | M_EXTPG);
233
234 /* See if this is the mbuf that holds the embedded refcount. */
235 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
236 refcnt = n->m_ext.ext_cnt = &m->m_ext.ext_count;
237 n->m_ext.ext_flags &= ~EXT_FLAG_EMBREF;
238 } else {
239 KASSERT(m->m_ext.ext_cnt != NULL,
240 ("%s: no refcounting pointer on %p", __func__, m));
241 refcnt = m->m_ext.ext_cnt;
242 }
243
244 if (*refcnt == 1)
245 *refcnt += 1;
246 else
247 atomic_add_int(refcnt, 1);
248 }
249
250 void
251 m_demote_pkthdr(struct mbuf *m)
252 {
253
254 M_ASSERTPKTHDR(m);
255
256 m_tag_delete_chain(m, NULL);
257 m->m_flags &= ~M_PKTHDR;
258 bzero(&m->m_pkthdr, sizeof(struct pkthdr));
259 }
260
261 /*
262 * Clean up mbuf (chain) from any tags and packet headers.
263 * If "all" is set then the first mbuf in the chain will be
264 * cleaned too.
265 */
266 void
267 m_demote(struct mbuf *m0, int all, int flags)
268 {
269 struct mbuf *m;
270
271 flags |= M_DEMOTEFLAGS;
272
273 for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
274 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p",
275 __func__, m, m0));
276 if (m->m_flags & M_PKTHDR)
277 m_demote_pkthdr(m);
278 m->m_flags &= flags;
279 }
280 }
281
282 /*
283 * Sanity checks on mbuf (chain) for use in KASSERT() and general
284 * debugging.
285 * Returns 0 or panics when bad and 1 on all tests passed.
286 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
287 * blow up later.
288 */
289 int
290 m_sanity(struct mbuf *m0, int sanitize)
291 {
292 struct mbuf *m;
293 caddr_t a, b;
294 int pktlen = 0;
295
296 #ifdef INVARIANTS
297 #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m)
298 #else
299 #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m)
300 #endif
301
302 for (m = m0; m != NULL; m = m->m_next) {
303 /*
304 * Basic pointer checks. If any of these fails then some
305 * unrelated kernel memory before or after us is trashed.
306 * No way to recover from that.
307 */
308 a = M_START(m);
309 b = a + M_SIZE(m);
310 if ((caddr_t)m->m_data < a)
311 M_SANITY_ACTION("m_data outside mbuf data range left");
312 if ((caddr_t)m->m_data > b)
313 M_SANITY_ACTION("m_data outside mbuf data range right");
314 if ((caddr_t)m->m_data + m->m_len > b)
315 M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
316
317 /* m->m_nextpkt may only be set on first mbuf in chain. */
318 if (m != m0 && m->m_nextpkt != NULL) {
319 if (sanitize) {
320 m_freem(m->m_nextpkt);
321 m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
322 } else
323 M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
324 }
325
326 /* packet length (not mbuf length!) calculation */
327 if (m0->m_flags & M_PKTHDR)
328 pktlen += m->m_len;
329
330 /* m_tags may only be attached to first mbuf in chain. */
331 if (m != m0 && m->m_flags & M_PKTHDR &&
332 !SLIST_EMPTY(&m->m_pkthdr.tags)) {
333 if (sanitize) {
334 m_tag_delete_chain(m, NULL);
335 /* put in 0xDEADC0DE perhaps? */
336 } else
337 M_SANITY_ACTION("m_tags on in-chain mbuf");
338 }
339
340 /* M_PKTHDR may only be set on first mbuf in chain */
341 if (m != m0 && m->m_flags & M_PKTHDR) {
342 if (sanitize) {
343 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
344 m->m_flags &= ~M_PKTHDR;
345 /* put in 0xDEADCODE and leave hdr flag in */
346 } else
347 M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
348 }
349 }
350 m = m0;
351 if (pktlen && pktlen != m->m_pkthdr.len) {
352 if (sanitize)
353 m->m_pkthdr.len = 0;
354 else
355 M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
356 }
357 return 1;
358
359 #undef M_SANITY_ACTION
360 }
361
362 /*
363 * Non-inlined part of m_init().
364 */
365 int
366 m_pkthdr_init(struct mbuf *m, int how)
367 {
368 #ifdef MAC
369 int error;
370 #endif
371 m->m_data = m->m_pktdat;
372 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
373 #ifdef NUMA
374 m->m_pkthdr.numa_domain = M_NODOM;
375 #endif
376 #ifdef MAC
377 /* If the label init fails, fail the alloc */
378 error = mac_mbuf_init(m, how);
379 if (error)
380 return (error);
381 #endif
382
383 return (0);
384 }
385
386 /*
387 * "Move" mbuf pkthdr from "from" to "to".
388 * "from" must have M_PKTHDR set, and "to" must be empty.
389 */
390 void
391 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
392 {
393
394 #if 0
395 /* see below for why these are not enabled */
396 M_ASSERTPKTHDR(to);
397 /* Note: with MAC, this may not be a good assertion. */
398 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
399 ("m_move_pkthdr: to has tags"));
400 #endif
401 #ifdef MAC
402 /*
403 * XXXMAC: It could be this should also occur for non-MAC?
404 */
405 if (to->m_flags & M_PKTHDR)
406 m_tag_delete_chain(to, NULL);
407 #endif
408 to->m_flags = (from->m_flags & M_COPYFLAGS) |
409 (to->m_flags & (M_EXT | M_EXTPG));
410 if ((to->m_flags & M_EXT) == 0)
411 to->m_data = to->m_pktdat;
412 to->m_pkthdr = from->m_pkthdr; /* especially tags */
413 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
414 from->m_flags &= ~M_PKTHDR;
415 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG) {
416 from->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
417 from->m_pkthdr.snd_tag = NULL;
418 }
419 }
420
421 /*
422 * Duplicate "from"'s mbuf pkthdr in "to".
423 * "from" must have M_PKTHDR set, and "to" must be empty.
424 * In particular, this does a deep copy of the packet tags.
425 */
426 int
427 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
428 {
429
430 #if 0
431 /*
432 * The mbuf allocator only initializes the pkthdr
433 * when the mbuf is allocated with m_gethdr(). Many users
434 * (e.g. m_copy*, m_prepend) use m_get() and then
435 * smash the pkthdr as needed causing these
436 * assertions to trip. For now just disable them.
437 */
438 M_ASSERTPKTHDR(to);
439 /* Note: with MAC, this may not be a good assertion. */
440 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
441 #endif
442 MBUF_CHECKSLEEP(how);
443 #ifdef MAC
444 if (to->m_flags & M_PKTHDR)
445 m_tag_delete_chain(to, NULL);
446 #endif
447 to->m_flags = (from->m_flags & M_COPYFLAGS) |
448 (to->m_flags & (M_EXT | M_EXTPG));
449 if ((to->m_flags & M_EXT) == 0)
450 to->m_data = to->m_pktdat;
451 to->m_pkthdr = from->m_pkthdr;
452 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG)
453 m_snd_tag_ref(from->m_pkthdr.snd_tag);
454 SLIST_INIT(&to->m_pkthdr.tags);
455 return (m_tag_copy_chain(to, from, how));
456 }
457
458 /*
459 * Lesser-used path for M_PREPEND:
460 * allocate new mbuf to prepend to chain,
461 * copy junk along.
462 */
463 struct mbuf *
464 m_prepend(struct mbuf *m, int len, int how)
465 {
466 struct mbuf *mn;
467
468 if (m->m_flags & M_PKTHDR)
469 mn = m_gethdr(how, m->m_type);
470 else
471 mn = m_get(how, m->m_type);
472 if (mn == NULL) {
473 m_freem(m);
474 return (NULL);
475 }
476 if (m->m_flags & M_PKTHDR)
477 m_move_pkthdr(mn, m);
478 mn->m_next = m;
479 m = mn;
480 if (len < M_SIZE(m))
481 M_ALIGN(m, len);
482 m->m_len = len;
483 return (m);
484 }
485
486 /*
487 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
488 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
489 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller.
490 * Note that the copy is read-only, because clusters are not copied,
491 * only their reference counts are incremented.
492 */
493 struct mbuf *
494 m_copym(struct mbuf *m, int off0, int len, int wait)
495 {
496 struct mbuf *n, **np;
497 int off = off0;
498 struct mbuf *top;
499 int copyhdr = 0;
500
501 KASSERT(off >= 0, ("m_copym, negative off %d", off));
502 KASSERT(len >= 0, ("m_copym, negative len %d", len));
503 MBUF_CHECKSLEEP(wait);
504 if (off == 0 && m->m_flags & M_PKTHDR)
505 copyhdr = 1;
506 while (off > 0) {
507 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
508 if (off < m->m_len)
509 break;
510 off -= m->m_len;
511 m = m->m_next;
512 }
513 np = ⊤
514 top = NULL;
515 while (len > 0) {
516 if (m == NULL) {
517 KASSERT(len == M_COPYALL,
518 ("m_copym, length > size of mbuf chain"));
519 break;
520 }
521 if (copyhdr)
522 n = m_gethdr(wait, m->m_type);
523 else
524 n = m_get(wait, m->m_type);
525 *np = n;
526 if (n == NULL)
527 goto nospace;
528 if (copyhdr) {
529 if (!m_dup_pkthdr(n, m, wait))
530 goto nospace;
531 if (len == M_COPYALL)
532 n->m_pkthdr.len -= off0;
533 else
534 n->m_pkthdr.len = len;
535 copyhdr = 0;
536 }
537 n->m_len = min(len, m->m_len - off);
538 if (m->m_flags & (M_EXT|M_EXTPG)) {
539 n->m_data = m->m_data + off;
540 mb_dupcl(n, m);
541 } else
542 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
543 (u_int)n->m_len);
544 if (len != M_COPYALL)
545 len -= n->m_len;
546 off = 0;
547 m = m->m_next;
548 np = &n->m_next;
549 }
550
551 return (top);
552 nospace:
553 m_freem(top);
554 return (NULL);
555 }
556
557 /*
558 * Copy an entire packet, including header (which must be present).
559 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
560 * Note that the copy is read-only, because clusters are not copied,
561 * only their reference counts are incremented.
562 * Preserve alignment of the first mbuf so if the creator has left
563 * some room at the beginning (e.g. for inserting protocol headers)
564 * the copies still have the room available.
565 */
566 struct mbuf *
567 m_copypacket(struct mbuf *m, int how)
568 {
569 struct mbuf *top, *n, *o;
570
571 MBUF_CHECKSLEEP(how);
572 n = m_get(how, m->m_type);
573 top = n;
574 if (n == NULL)
575 goto nospace;
576
577 if (!m_dup_pkthdr(n, m, how))
578 goto nospace;
579 n->m_len = m->m_len;
580 if (m->m_flags & (M_EXT|M_EXTPG)) {
581 n->m_data = m->m_data;
582 mb_dupcl(n, m);
583 } else {
584 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
585 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
586 }
587
588 m = m->m_next;
589 while (m) {
590 o = m_get(how, m->m_type);
591 if (o == NULL)
592 goto nospace;
593
594 n->m_next = o;
595 n = n->m_next;
596
597 n->m_len = m->m_len;
598 if (m->m_flags & (M_EXT|M_EXTPG)) {
599 n->m_data = m->m_data;
600 mb_dupcl(n, m);
601 } else {
602 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
603 }
604
605 m = m->m_next;
606 }
607 return top;
608 nospace:
609 m_freem(top);
610 return (NULL);
611 }
612
613 static void
614 m_copyfromunmapped(const struct mbuf *m, int off, int len, caddr_t cp)
615 {
616 struct iovec iov;
617 struct uio uio;
618 int error;
619
620 KASSERT(off >= 0, ("m_copyfromunmapped: negative off %d", off));
621 KASSERT(len >= 0, ("m_copyfromunmapped: negative len %d", len));
622 KASSERT(off < m->m_len,
623 ("m_copyfromunmapped: len exceeds mbuf length"));
624 iov.iov_base = cp;
625 iov.iov_len = len;
626 uio.uio_resid = len;
627 uio.uio_iov = &iov;
628 uio.uio_segflg = UIO_SYSSPACE;
629 uio.uio_iovcnt = 1;
630 uio.uio_offset = 0;
631 uio.uio_rw = UIO_READ;
632 error = m_unmapped_uiomove(m, off, &uio, len);
633 KASSERT(error == 0, ("m_unmapped_uiomove failed: off %d, len %d", off,
634 len));
635 }
636
637 /*
638 * Copy data from an mbuf chain starting "off" bytes from the beginning,
639 * continuing for "len" bytes, into the indicated buffer.
640 */
641 void
642 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
643 {
644 u_int count;
645
646 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
647 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
648 while (off > 0) {
649 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
650 if (off < m->m_len)
651 break;
652 off -= m->m_len;
653 m = m->m_next;
654 }
655 while (len > 0) {
656 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
657 count = min(m->m_len - off, len);
658 if ((m->m_flags & M_EXTPG) != 0)
659 m_copyfromunmapped(m, off, count, cp);
660 else
661 bcopy(mtod(m, caddr_t) + off, cp, count);
662 len -= count;
663 cp += count;
664 off = 0;
665 m = m->m_next;
666 }
667 }
668
669 /*
670 * Copy a packet header mbuf chain into a completely new chain, including
671 * copying any mbuf clusters. Use this instead of m_copypacket() when
672 * you need a writable copy of an mbuf chain.
673 */
674 struct mbuf *
675 m_dup(const struct mbuf *m, int how)
676 {
677 struct mbuf **p, *top = NULL;
678 int remain, moff, nsize;
679
680 MBUF_CHECKSLEEP(how);
681 /* Sanity check */
682 if (m == NULL)
683 return (NULL);
684 M_ASSERTPKTHDR(m);
685
686 /* While there's more data, get a new mbuf, tack it on, and fill it */
687 remain = m->m_pkthdr.len;
688 moff = 0;
689 p = ⊤
690 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
691 struct mbuf *n;
692
693 /* Get the next new mbuf */
694 if (remain >= MINCLSIZE) {
695 n = m_getcl(how, m->m_type, 0);
696 nsize = MCLBYTES;
697 } else {
698 n = m_get(how, m->m_type);
699 nsize = MLEN;
700 }
701 if (n == NULL)
702 goto nospace;
703
704 if (top == NULL) { /* First one, must be PKTHDR */
705 if (!m_dup_pkthdr(n, m, how)) {
706 m_free(n);
707 goto nospace;
708 }
709 if ((n->m_flags & M_EXT) == 0)
710 nsize = MHLEN;
711 n->m_flags &= ~M_RDONLY;
712 }
713 n->m_len = 0;
714
715 /* Link it into the new chain */
716 *p = n;
717 p = &n->m_next;
718
719 /* Copy data from original mbuf(s) into new mbuf */
720 while (n->m_len < nsize && m != NULL) {
721 int chunk = min(nsize - n->m_len, m->m_len - moff);
722
723 m_copydata(m, moff, chunk, n->m_data + n->m_len);
724 moff += chunk;
725 n->m_len += chunk;
726 remain -= chunk;
727 if (moff == m->m_len) {
728 m = m->m_next;
729 moff = 0;
730 }
731 }
732
733 /* Check correct total mbuf length */
734 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
735 ("%s: bogus m_pkthdr.len", __func__));
736 }
737 return (top);
738
739 nospace:
740 m_freem(top);
741 return (NULL);
742 }
743
744 /*
745 * Concatenate mbuf chain n to m.
746 * Both chains must be of the same type (e.g. MT_DATA).
747 * Any m_pkthdr is not updated.
748 */
749 void
750 m_cat(struct mbuf *m, struct mbuf *n)
751 {
752 while (m->m_next)
753 m = m->m_next;
754 while (n) {
755 if (!M_WRITABLE(m) ||
756 (n->m_flags & M_EXTPG) != 0 ||
757 M_TRAILINGSPACE(m) < n->m_len) {
758 /* just join the two chains */
759 m->m_next = n;
760 return;
761 }
762 /* splat the data from one into the other */
763 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
764 (u_int)n->m_len);
765 m->m_len += n->m_len;
766 n = m_free(n);
767 }
768 }
769
770 /*
771 * Concatenate two pkthdr mbuf chains.
772 */
773 void
774 m_catpkt(struct mbuf *m, struct mbuf *n)
775 {
776
777 M_ASSERTPKTHDR(m);
778 M_ASSERTPKTHDR(n);
779
780 m->m_pkthdr.len += n->m_pkthdr.len;
781 m_demote(n, 1, 0);
782
783 m_cat(m, n);
784 }
785
786 void
787 m_adj(struct mbuf *mp, int req_len)
788 {
789 int len = req_len;
790 struct mbuf *m;
791 int count;
792
793 if ((m = mp) == NULL)
794 return;
795 if (len >= 0) {
796 /*
797 * Trim from head.
798 */
799 while (m != NULL && len > 0) {
800 if (m->m_len <= len) {
801 len -= m->m_len;
802 m->m_len = 0;
803 m = m->m_next;
804 } else {
805 m->m_len -= len;
806 m->m_data += len;
807 len = 0;
808 }
809 }
810 if (mp->m_flags & M_PKTHDR)
811 mp->m_pkthdr.len -= (req_len - len);
812 } else {
813 /*
814 * Trim from tail. Scan the mbuf chain,
815 * calculating its length and finding the last mbuf.
816 * If the adjustment only affects this mbuf, then just
817 * adjust and return. Otherwise, rescan and truncate
818 * after the remaining size.
819 */
820 len = -len;
821 count = 0;
822 for (;;) {
823 count += m->m_len;
824 if (m->m_next == (struct mbuf *)0)
825 break;
826 m = m->m_next;
827 }
828 if (m->m_len >= len) {
829 m->m_len -= len;
830 if (mp->m_flags & M_PKTHDR)
831 mp->m_pkthdr.len -= len;
832 return;
833 }
834 count -= len;
835 if (count < 0)
836 count = 0;
837 /*
838 * Correct length for chain is "count".
839 * Find the mbuf with last data, adjust its length,
840 * and toss data from remaining mbufs on chain.
841 */
842 m = mp;
843 if (m->m_flags & M_PKTHDR)
844 m->m_pkthdr.len = count;
845 for (; m; m = m->m_next) {
846 if (m->m_len >= count) {
847 m->m_len = count;
848 if (m->m_next != NULL) {
849 m_freem(m->m_next);
850 m->m_next = NULL;
851 }
852 break;
853 }
854 count -= m->m_len;
855 }
856 }
857 }
858
859 void
860 m_adj_decap(struct mbuf *mp, int len)
861 {
862 uint8_t rsstype;
863
864 m_adj(mp, len);
865 if ((mp->m_flags & M_PKTHDR) != 0) {
866 /*
867 * If flowid was calculated by card from the inner
868 * headers, move flowid to the decapsulated mbuf
869 * chain, otherwise clear. This depends on the
870 * internals of m_adj, which keeps pkthdr as is, in
871 * particular not changing rsstype and flowid.
872 */
873 rsstype = mp->m_pkthdr.rsstype;
874 if ((rsstype & M_HASHTYPE_INNER) != 0) {
875 M_HASHTYPE_SET(mp, rsstype & ~M_HASHTYPE_INNER);
876 } else {
877 M_HASHTYPE_CLEAR(mp);
878 }
879 }
880 }
881
882 /*
883 * Rearange an mbuf chain so that len bytes are contiguous
884 * and in the data area of an mbuf (so that mtod will work
885 * for a structure of size len). Returns the resulting
886 * mbuf chain on success, frees it and returns null on failure.
887 * If there is room, it will add up to max_protohdr-len extra bytes to the
888 * contiguous region in an attempt to avoid being called next time.
889 */
890 struct mbuf *
891 m_pullup(struct mbuf *n, int len)
892 {
893 struct mbuf *m;
894 int count;
895 int space;
896
897 KASSERT((n->m_flags & M_EXTPG) == 0,
898 ("%s: unmapped mbuf %p", __func__, n));
899
900 /*
901 * If first mbuf has no cluster, and has room for len bytes
902 * without shifting current data, pullup into it,
903 * otherwise allocate a new mbuf to prepend to the chain.
904 */
905 if ((n->m_flags & M_EXT) == 0 &&
906 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
907 if (n->m_len >= len)
908 return (n);
909 m = n;
910 n = n->m_next;
911 len -= m->m_len;
912 } else {
913 if (len > MHLEN)
914 goto bad;
915 m = m_get(M_NOWAIT, n->m_type);
916 if (m == NULL)
917 goto bad;
918 if (n->m_flags & M_PKTHDR)
919 m_move_pkthdr(m, n);
920 }
921 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
922 do {
923 count = min(min(max(len, max_protohdr), space), n->m_len);
924 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
925 (u_int)count);
926 len -= count;
927 m->m_len += count;
928 n->m_len -= count;
929 space -= count;
930 if (n->m_len)
931 n->m_data += count;
932 else
933 n = m_free(n);
934 } while (len > 0 && n);
935 if (len > 0) {
936 (void) m_free(m);
937 goto bad;
938 }
939 m->m_next = n;
940 return (m);
941 bad:
942 m_freem(n);
943 return (NULL);
944 }
945
946 /*
947 * Like m_pullup(), except a new mbuf is always allocated, and we allow
948 * the amount of empty space before the data in the new mbuf to be specified
949 * (in the event that the caller expects to prepend later).
950 */
951 struct mbuf *
952 m_copyup(struct mbuf *n, int len, int dstoff)
953 {
954 struct mbuf *m;
955 int count, space;
956
957 if (len > (MHLEN - dstoff))
958 goto bad;
959 m = m_get(M_NOWAIT, n->m_type);
960 if (m == NULL)
961 goto bad;
962 if (n->m_flags & M_PKTHDR)
963 m_move_pkthdr(m, n);
964 m->m_data += dstoff;
965 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
966 do {
967 count = min(min(max(len, max_protohdr), space), n->m_len);
968 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
969 (unsigned)count);
970 len -= count;
971 m->m_len += count;
972 n->m_len -= count;
973 space -= count;
974 if (n->m_len)
975 n->m_data += count;
976 else
977 n = m_free(n);
978 } while (len > 0 && n);
979 if (len > 0) {
980 (void) m_free(m);
981 goto bad;
982 }
983 m->m_next = n;
984 return (m);
985 bad:
986 m_freem(n);
987 return (NULL);
988 }
989
990 /*
991 * Partition an mbuf chain in two pieces, returning the tail --
992 * all but the first len0 bytes. In case of failure, it returns NULL and
993 * attempts to restore the chain to its original state.
994 *
995 * Note that the resulting mbufs might be read-only, because the new
996 * mbuf can end up sharing an mbuf cluster with the original mbuf if
997 * the "breaking point" happens to lie within a cluster mbuf. Use the
998 * M_WRITABLE() macro to check for this case.
999 */
1000 struct mbuf *
1001 m_split(struct mbuf *m0, int len0, int wait)
1002 {
1003 struct mbuf *m, *n;
1004 u_int len = len0, remain;
1005
1006 MBUF_CHECKSLEEP(wait);
1007 for (m = m0; m && len > m->m_len; m = m->m_next)
1008 len -= m->m_len;
1009 if (m == NULL)
1010 return (NULL);
1011 remain = m->m_len - len;
1012 if (m0->m_flags & M_PKTHDR && remain == 0) {
1013 n = m_gethdr(wait, m0->m_type);
1014 if (n == NULL)
1015 return (NULL);
1016 n->m_next = m->m_next;
1017 m->m_next = NULL;
1018 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) {
1019 n->m_pkthdr.snd_tag =
1020 m_snd_tag_ref(m0->m_pkthdr.snd_tag);
1021 n->m_pkthdr.csum_flags |= CSUM_SND_TAG;
1022 } else
1023 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1024 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1025 m0->m_pkthdr.len = len0;
1026 return (n);
1027 } else if (m0->m_flags & M_PKTHDR) {
1028 n = m_gethdr(wait, m0->m_type);
1029 if (n == NULL)
1030 return (NULL);
1031 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) {
1032 n->m_pkthdr.snd_tag =
1033 m_snd_tag_ref(m0->m_pkthdr.snd_tag);
1034 n->m_pkthdr.csum_flags |= CSUM_SND_TAG;
1035 } else
1036 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1037 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1038 m0->m_pkthdr.len = len0;
1039 if (m->m_flags & (M_EXT|M_EXTPG))
1040 goto extpacket;
1041 if (remain > MHLEN) {
1042 /* m can't be the lead packet */
1043 M_ALIGN(n, 0);
1044 n->m_next = m_split(m, len, wait);
1045 if (n->m_next == NULL) {
1046 (void) m_free(n);
1047 return (NULL);
1048 } else {
1049 n->m_len = 0;
1050 return (n);
1051 }
1052 } else
1053 M_ALIGN(n, remain);
1054 } else if (remain == 0) {
1055 n = m->m_next;
1056 m->m_next = NULL;
1057 return (n);
1058 } else {
1059 n = m_get(wait, m->m_type);
1060 if (n == NULL)
1061 return (NULL);
1062 M_ALIGN(n, remain);
1063 }
1064 extpacket:
1065 if (m->m_flags & (M_EXT|M_EXTPG)) {
1066 n->m_data = m->m_data + len;
1067 mb_dupcl(n, m);
1068 } else {
1069 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1070 }
1071 n->m_len = remain;
1072 m->m_len = len;
1073 n->m_next = m->m_next;
1074 m->m_next = NULL;
1075 return (n);
1076 }
1077 /*
1078 * Routine to copy from device local memory into mbufs.
1079 * Note that `off' argument is offset into first mbuf of target chain from
1080 * which to begin copying the data to.
1081 */
1082 struct mbuf *
1083 m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
1084 void (*copy)(char *from, caddr_t to, u_int len))
1085 {
1086 struct mbuf *m;
1087 struct mbuf *top = NULL, **mp = ⊤
1088 int len;
1089
1090 if (off < 0 || off > MHLEN)
1091 return (NULL);
1092
1093 while (totlen > 0) {
1094 if (top == NULL) { /* First one, must be PKTHDR */
1095 if (totlen + off >= MINCLSIZE) {
1096 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1097 len = MCLBYTES;
1098 } else {
1099 m = m_gethdr(M_NOWAIT, MT_DATA);
1100 len = MHLEN;
1101
1102 /* Place initial small packet/header at end of mbuf */
1103 if (m && totlen + off + max_linkhdr <= MHLEN) {
1104 m->m_data += max_linkhdr;
1105 len -= max_linkhdr;
1106 }
1107 }
1108 if (m == NULL)
1109 return NULL;
1110 m->m_pkthdr.rcvif = ifp;
1111 m->m_pkthdr.len = totlen;
1112 } else {
1113 if (totlen + off >= MINCLSIZE) {
1114 m = m_getcl(M_NOWAIT, MT_DATA, 0);
1115 len = MCLBYTES;
1116 } else {
1117 m = m_get(M_NOWAIT, MT_DATA);
1118 len = MLEN;
1119 }
1120 if (m == NULL) {
1121 m_freem(top);
1122 return NULL;
1123 }
1124 }
1125 if (off) {
1126 m->m_data += off;
1127 len -= off;
1128 off = 0;
1129 }
1130 m->m_len = len = min(totlen, len);
1131 if (copy)
1132 copy(buf, mtod(m, caddr_t), (u_int)len);
1133 else
1134 bcopy(buf, mtod(m, caddr_t), (u_int)len);
1135 buf += len;
1136 *mp = m;
1137 mp = &m->m_next;
1138 totlen -= len;
1139 }
1140 return (top);
1141 }
1142
1143 static void
1144 m_copytounmapped(const struct mbuf *m, int off, int len, c_caddr_t cp)
1145 {
1146 struct iovec iov;
1147 struct uio uio;
1148 int error;
1149
1150 KASSERT(off >= 0, ("m_copytounmapped: negative off %d", off));
1151 KASSERT(len >= 0, ("m_copytounmapped: negative len %d", len));
1152 KASSERT(off < m->m_len, ("m_copytounmapped: len exceeds mbuf length"));
1153 iov.iov_base = __DECONST(caddr_t, cp);
1154 iov.iov_len = len;
1155 uio.uio_resid = len;
1156 uio.uio_iov = &iov;
1157 uio.uio_segflg = UIO_SYSSPACE;
1158 uio.uio_iovcnt = 1;
1159 uio.uio_offset = 0;
1160 uio.uio_rw = UIO_WRITE;
1161 error = m_unmapped_uiomove(m, off, &uio, len);
1162 KASSERT(error == 0, ("m_unmapped_uiomove failed: off %d, len %d", off,
1163 len));
1164 }
1165
1166 /*
1167 * Copy data from a buffer back into the indicated mbuf chain,
1168 * starting "off" bytes from the beginning, extending the mbuf
1169 * chain if necessary.
1170 */
1171 void
1172 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
1173 {
1174 int mlen;
1175 struct mbuf *m = m0, *n;
1176 int totlen = 0;
1177
1178 if (m0 == NULL)
1179 return;
1180 while (off > (mlen = m->m_len)) {
1181 off -= mlen;
1182 totlen += mlen;
1183 if (m->m_next == NULL) {
1184 n = m_get(M_NOWAIT, m->m_type);
1185 if (n == NULL)
1186 goto out;
1187 bzero(mtod(n, caddr_t), MLEN);
1188 n->m_len = min(MLEN, len + off);
1189 m->m_next = n;
1190 }
1191 m = m->m_next;
1192 }
1193 while (len > 0) {
1194 if (m->m_next == NULL && (len > m->m_len - off)) {
1195 m->m_len += min(len - (m->m_len - off),
1196 M_TRAILINGSPACE(m));
1197 }
1198 mlen = min (m->m_len - off, len);
1199 if ((m->m_flags & M_EXTPG) != 0)
1200 m_copytounmapped(m, off, mlen, cp);
1201 else
1202 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1203 cp += mlen;
1204 len -= mlen;
1205 mlen += off;
1206 off = 0;
1207 totlen += mlen;
1208 if (len == 0)
1209 break;
1210 if (m->m_next == NULL) {
1211 n = m_get(M_NOWAIT, m->m_type);
1212 if (n == NULL)
1213 break;
1214 n->m_len = min(MLEN, len);
1215 m->m_next = n;
1216 }
1217 m = m->m_next;
1218 }
1219 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1220 m->m_pkthdr.len = totlen;
1221 }
1222
1223 /*
1224 * Append the specified data to the indicated mbuf chain,
1225 * Extend the mbuf chain if the new data does not fit in
1226 * existing space.
1227 *
1228 * Return 1 if able to complete the job; otherwise 0.
1229 */
1230 int
1231 m_append(struct mbuf *m0, int len, c_caddr_t cp)
1232 {
1233 struct mbuf *m, *n;
1234 int remainder, space;
1235
1236 for (m = m0; m->m_next != NULL; m = m->m_next)
1237 ;
1238 remainder = len;
1239 space = M_TRAILINGSPACE(m);
1240 if (space > 0) {
1241 /*
1242 * Copy into available space.
1243 */
1244 if (space > remainder)
1245 space = remainder;
1246 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1247 m->m_len += space;
1248 cp += space, remainder -= space;
1249 }
1250 while (remainder > 0) {
1251 /*
1252 * Allocate a new mbuf; could check space
1253 * and allocate a cluster instead.
1254 */
1255 n = m_get(M_NOWAIT, m->m_type);
1256 if (n == NULL)
1257 break;
1258 n->m_len = min(MLEN, remainder);
1259 bcopy(cp, mtod(n, caddr_t), n->m_len);
1260 cp += n->m_len, remainder -= n->m_len;
1261 m->m_next = n;
1262 m = n;
1263 }
1264 if (m0->m_flags & M_PKTHDR)
1265 m0->m_pkthdr.len += len - remainder;
1266 return (remainder == 0);
1267 }
1268
1269 static int
1270 m_apply_extpg_one(struct mbuf *m, int off, int len,
1271 int (*f)(void *, void *, u_int), void *arg)
1272 {
1273 void *p;
1274 u_int i, count, pgoff, pglen;
1275 int rval;
1276
1277 KASSERT(PMAP_HAS_DMAP,
1278 ("m_apply_extpg_one does not support unmapped mbufs"));
1279 off += mtod(m, vm_offset_t);
1280 if (off < m->m_epg_hdrlen) {
1281 count = min(m->m_epg_hdrlen - off, len);
1282 rval = f(arg, m->m_epg_hdr + off, count);
1283 if (rval)
1284 return (rval);
1285 len -= count;
1286 off = 0;
1287 } else
1288 off -= m->m_epg_hdrlen;
1289 pgoff = m->m_epg_1st_off;
1290 for (i = 0; i < m->m_epg_npgs && len > 0; i++) {
1291 pglen = m_epg_pagelen(m, i, pgoff);
1292 if (off < pglen) {
1293 count = min(pglen - off, len);
1294 p = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] + pgoff);
1295 rval = f(arg, p, count);
1296 if (rval)
1297 return (rval);
1298 len -= count;
1299 off = 0;
1300 } else
1301 off -= pglen;
1302 pgoff = 0;
1303 }
1304 if (len > 0) {
1305 KASSERT(off < m->m_epg_trllen,
1306 ("m_apply_extpg_one: offset beyond trailer"));
1307 KASSERT(len <= m->m_epg_trllen - off,
1308 ("m_apply_extpg_one: length beyond trailer"));
1309 return (f(arg, m->m_epg_trail + off, len));
1310 }
1311 return (0);
1312 }
1313
1314 /* Apply function f to the data in a single mbuf. */
1315 static int
1316 m_apply_one(struct mbuf *m, int off, int len,
1317 int (*f)(void *, void *, u_int), void *arg)
1318 {
1319 if ((m->m_flags & M_EXTPG) != 0)
1320 return (m_apply_extpg_one(m, off, len, f, arg));
1321 else
1322 return (f(arg, mtod(m, caddr_t) + off, len));
1323 }
1324
1325 /*
1326 * Apply function f to the data in an mbuf chain starting "off" bytes from
1327 * the beginning, continuing for "len" bytes.
1328 */
1329 int
1330 m_apply(struct mbuf *m, int off, int len,
1331 int (*f)(void *, void *, u_int), void *arg)
1332 {
1333 u_int count;
1334 int rval;
1335
1336 KASSERT(off >= 0, ("m_apply, negative off %d", off));
1337 KASSERT(len >= 0, ("m_apply, negative len %d", len));
1338 while (off > 0) {
1339 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1340 if (off < m->m_len)
1341 break;
1342 off -= m->m_len;
1343 m = m->m_next;
1344 }
1345 while (len > 0) {
1346 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1347 count = min(m->m_len - off, len);
1348 rval = m_apply_one(m, off, count, f, arg);
1349 if (rval)
1350 return (rval);
1351 len -= count;
1352 off = 0;
1353 m = m->m_next;
1354 }
1355 return (0);
1356 }
1357
1358 /*
1359 * Return a pointer to mbuf/offset of location in mbuf chain.
1360 */
1361 struct mbuf *
1362 m_getptr(struct mbuf *m, int loc, int *off)
1363 {
1364
1365 while (loc >= 0) {
1366 /* Normal end of search. */
1367 if (m->m_len > loc) {
1368 *off = loc;
1369 return (m);
1370 } else {
1371 loc -= m->m_len;
1372 if (m->m_next == NULL) {
1373 if (loc == 0) {
1374 /* Point at the end of valid data. */
1375 *off = m->m_len;
1376 return (m);
1377 }
1378 return (NULL);
1379 }
1380 m = m->m_next;
1381 }
1382 }
1383 return (NULL);
1384 }
1385
1386 void
1387 m_print(const struct mbuf *m, int maxlen)
1388 {
1389 int len;
1390 int pdata;
1391 const struct mbuf *m2;
1392
1393 if (m == NULL) {
1394 printf("mbuf: %p\n", m);
1395 return;
1396 }
1397
1398 if (m->m_flags & M_PKTHDR)
1399 len = m->m_pkthdr.len;
1400 else
1401 len = -1;
1402 m2 = m;
1403 while (m2 != NULL && (len == -1 || len)) {
1404 pdata = m2->m_len;
1405 if (maxlen != -1 && pdata > maxlen)
1406 pdata = maxlen;
1407 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1408 m2->m_next, m2->m_flags, "\2\20freelist\17skipfw"
1409 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1410 "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1411 if (pdata)
1412 printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1413 if (len != -1)
1414 len -= m2->m_len;
1415 m2 = m2->m_next;
1416 }
1417 if (len > 0)
1418 printf("%d bytes unaccounted for.\n", len);
1419 return;
1420 }
1421
1422 u_int
1423 m_fixhdr(struct mbuf *m0)
1424 {
1425 u_int len;
1426
1427 len = m_length(m0, NULL);
1428 m0->m_pkthdr.len = len;
1429 return (len);
1430 }
1431
1432 u_int
1433 m_length(struct mbuf *m0, struct mbuf **last)
1434 {
1435 struct mbuf *m;
1436 u_int len;
1437
1438 len = 0;
1439 for (m = m0; m != NULL; m = m->m_next) {
1440 len += m->m_len;
1441 if (m->m_next == NULL)
1442 break;
1443 }
1444 if (last != NULL)
1445 *last = m;
1446 return (len);
1447 }
1448
1449 /*
1450 * Defragment a mbuf chain, returning the shortest possible
1451 * chain of mbufs and clusters. If allocation fails and
1452 * this cannot be completed, NULL will be returned, but
1453 * the passed in chain will be unchanged. Upon success,
1454 * the original chain will be freed, and the new chain
1455 * will be returned.
1456 *
1457 * If a non-packet header is passed in, the original
1458 * mbuf (chain?) will be returned unharmed.
1459 */
1460 struct mbuf *
1461 m_defrag(struct mbuf *m0, int how)
1462 {
1463 struct mbuf *m_new = NULL, *m_final = NULL;
1464 int progress = 0, length;
1465
1466 MBUF_CHECKSLEEP(how);
1467 if (!(m0->m_flags & M_PKTHDR))
1468 return (m0);
1469
1470 m_fixhdr(m0); /* Needed sanity check */
1471
1472 #ifdef MBUF_STRESS_TEST
1473 if (m_defragrandomfailures) {
1474 int temp = arc4random() & 0xff;
1475 if (temp == 0xba)
1476 goto nospace;
1477 }
1478 #endif
1479
1480 if (m0->m_pkthdr.len > MHLEN)
1481 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1482 else
1483 m_final = m_gethdr(how, MT_DATA);
1484
1485 if (m_final == NULL)
1486 goto nospace;
1487
1488 if (m_dup_pkthdr(m_final, m0, how) == 0)
1489 goto nospace;
1490
1491 m_new = m_final;
1492
1493 while (progress < m0->m_pkthdr.len) {
1494 length = m0->m_pkthdr.len - progress;
1495 if (length > MCLBYTES)
1496 length = MCLBYTES;
1497
1498 if (m_new == NULL) {
1499 if (length > MLEN)
1500 m_new = m_getcl(how, MT_DATA, 0);
1501 else
1502 m_new = m_get(how, MT_DATA);
1503 if (m_new == NULL)
1504 goto nospace;
1505 }
1506
1507 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1508 progress += length;
1509 m_new->m_len = length;
1510 if (m_new != m_final)
1511 m_cat(m_final, m_new);
1512 m_new = NULL;
1513 }
1514 #ifdef MBUF_STRESS_TEST
1515 if (m0->m_next == NULL)
1516 m_defraguseless++;
1517 #endif
1518 m_freem(m0);
1519 m0 = m_final;
1520 #ifdef MBUF_STRESS_TEST
1521 m_defragpackets++;
1522 m_defragbytes += m0->m_pkthdr.len;
1523 #endif
1524 return (m0);
1525 nospace:
1526 #ifdef MBUF_STRESS_TEST
1527 m_defragfailure++;
1528 #endif
1529 if (m_final)
1530 m_freem(m_final);
1531 return (NULL);
1532 }
1533
1534 /*
1535 * Return the number of fragments an mbuf will use. This is usually
1536 * used as a proxy for the number of scatter/gather elements needed by
1537 * a DMA engine to access an mbuf. In general mapped mbufs are
1538 * assumed to be backed by physically contiguous buffers that only
1539 * need a single fragment. Unmapped mbufs, on the other hand, can
1540 * span disjoint physical pages.
1541 */
1542 static int
1543 frags_per_mbuf(struct mbuf *m)
1544 {
1545 int frags;
1546
1547 if ((m->m_flags & M_EXTPG) == 0)
1548 return (1);
1549
1550 /*
1551 * The header and trailer are counted as a single fragment
1552 * each when present.
1553 *
1554 * XXX: This overestimates the number of fragments by assuming
1555 * all the backing physical pages are disjoint.
1556 */
1557 frags = 0;
1558 if (m->m_epg_hdrlen != 0)
1559 frags++;
1560 frags += m->m_epg_npgs;
1561 if (m->m_epg_trllen != 0)
1562 frags++;
1563
1564 return (frags);
1565 }
1566
1567 /*
1568 * Defragment an mbuf chain, returning at most maxfrags separate
1569 * mbufs+clusters. If this is not possible NULL is returned and
1570 * the original mbuf chain is left in its present (potentially
1571 * modified) state. We use two techniques: collapsing consecutive
1572 * mbufs and replacing consecutive mbufs by a cluster.
1573 *
1574 * NB: this should really be named m_defrag but that name is taken
1575 */
1576 struct mbuf *
1577 m_collapse(struct mbuf *m0, int how, int maxfrags)
1578 {
1579 struct mbuf *m, *n, *n2, **prev;
1580 u_int curfrags;
1581
1582 /*
1583 * Calculate the current number of frags.
1584 */
1585 curfrags = 0;
1586 for (m = m0; m != NULL; m = m->m_next)
1587 curfrags += frags_per_mbuf(m);
1588 /*
1589 * First, try to collapse mbufs. Note that we always collapse
1590 * towards the front so we don't need to deal with moving the
1591 * pkthdr. This may be suboptimal if the first mbuf has much
1592 * less data than the following.
1593 */
1594 m = m0;
1595 again:
1596 for (;;) {
1597 n = m->m_next;
1598 if (n == NULL)
1599 break;
1600 if (M_WRITABLE(m) &&
1601 n->m_len < M_TRAILINGSPACE(m)) {
1602 m_copydata(n, 0, n->m_len,
1603 mtod(m, char *) + m->m_len);
1604 m->m_len += n->m_len;
1605 m->m_next = n->m_next;
1606 curfrags -= frags_per_mbuf(n);
1607 m_free(n);
1608 if (curfrags <= maxfrags)
1609 return m0;
1610 } else
1611 m = n;
1612 }
1613 KASSERT(maxfrags > 1,
1614 ("maxfrags %u, but normal collapse failed", maxfrags));
1615 /*
1616 * Collapse consecutive mbufs to a cluster.
1617 */
1618 prev = &m0->m_next; /* NB: not the first mbuf */
1619 while ((n = *prev) != NULL) {
1620 if ((n2 = n->m_next) != NULL &&
1621 n->m_len + n2->m_len < MCLBYTES) {
1622 m = m_getcl(how, MT_DATA, 0);
1623 if (m == NULL)
1624 goto bad;
1625 m_copydata(n, 0, n->m_len, mtod(m, char *));
1626 m_copydata(n2, 0, n2->m_len,
1627 mtod(m, char *) + n->m_len);
1628 m->m_len = n->m_len + n2->m_len;
1629 m->m_next = n2->m_next;
1630 *prev = m;
1631 curfrags += 1; /* For the new cluster */
1632 curfrags -= frags_per_mbuf(n);
1633 curfrags -= frags_per_mbuf(n2);
1634 m_free(n);
1635 m_free(n2);
1636 if (curfrags <= maxfrags)
1637 return m0;
1638 /*
1639 * Still not there, try the normal collapse
1640 * again before we allocate another cluster.
1641 */
1642 goto again;
1643 }
1644 prev = &n->m_next;
1645 }
1646 /*
1647 * No place where we can collapse to a cluster; punt.
1648 * This can occur if, for example, you request 2 frags
1649 * but the packet requires that both be clusters (we
1650 * never reallocate the first mbuf to avoid moving the
1651 * packet header).
1652 */
1653 bad:
1654 return NULL;
1655 }
1656
1657 #ifdef MBUF_STRESS_TEST
1658
1659 /*
1660 * Fragment an mbuf chain. There's no reason you'd ever want to do
1661 * this in normal usage, but it's great for stress testing various
1662 * mbuf consumers.
1663 *
1664 * If fragmentation is not possible, the original chain will be
1665 * returned.
1666 *
1667 * Possible length values:
1668 * 0 no fragmentation will occur
1669 * > 0 each fragment will be of the specified length
1670 * -1 each fragment will be the same random value in length
1671 * -2 each fragment's length will be entirely random
1672 * (Random values range from 1 to 256)
1673 */
1674 struct mbuf *
1675 m_fragment(struct mbuf *m0, int how, int length)
1676 {
1677 struct mbuf *m_first, *m_last;
1678 int divisor = 255, progress = 0, fraglen;
1679
1680 if (!(m0->m_flags & M_PKTHDR))
1681 return (m0);
1682
1683 if (length == 0 || length < -2)
1684 return (m0);
1685 if (length > MCLBYTES)
1686 length = MCLBYTES;
1687 if (length < 0 && divisor > MCLBYTES)
1688 divisor = MCLBYTES;
1689 if (length == -1)
1690 length = 1 + (arc4random() % divisor);
1691 if (length > 0)
1692 fraglen = length;
1693
1694 m_fixhdr(m0); /* Needed sanity check */
1695
1696 m_first = m_getcl(how, MT_DATA, M_PKTHDR);
1697 if (m_first == NULL)
1698 goto nospace;
1699
1700 if (m_dup_pkthdr(m_first, m0, how) == 0)
1701 goto nospace;
1702
1703 m_last = m_first;
1704
1705 while (progress < m0->m_pkthdr.len) {
1706 if (length == -2)
1707 fraglen = 1 + (arc4random() % divisor);
1708 if (fraglen > m0->m_pkthdr.len - progress)
1709 fraglen = m0->m_pkthdr.len - progress;
1710
1711 if (progress != 0) {
1712 struct mbuf *m_new = m_getcl(how, MT_DATA, 0);
1713 if (m_new == NULL)
1714 goto nospace;
1715
1716 m_last->m_next = m_new;
1717 m_last = m_new;
1718 }
1719
1720 m_copydata(m0, progress, fraglen, mtod(m_last, caddr_t));
1721 progress += fraglen;
1722 m_last->m_len = fraglen;
1723 }
1724 m_freem(m0);
1725 m0 = m_first;
1726 return (m0);
1727 nospace:
1728 if (m_first)
1729 m_freem(m_first);
1730 /* Return the original chain on failure */
1731 return (m0);
1732 }
1733
1734 #endif
1735
1736 /*
1737 * Free pages from mbuf_ext_pgs, assuming they were allocated via
1738 * vm_page_alloc() and aren't associated with any object. Complement
1739 * to allocator from m_uiotombuf_nomap().
1740 */
1741 void
1742 mb_free_mext_pgs(struct mbuf *m)
1743 {
1744 vm_page_t pg;
1745
1746 M_ASSERTEXTPG(m);
1747 for (int i = 0; i < m->m_epg_npgs; i++) {
1748 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
1749 vm_page_unwire_noq(pg);
1750 vm_page_free(pg);
1751 }
1752 }
1753
1754 static struct mbuf *
1755 m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags)
1756 {
1757 struct mbuf *m, *mb, *prev;
1758 vm_page_t pg_array[MBUF_PEXT_MAX_PGS];
1759 int error, length, i, needed;
1760 ssize_t total;
1761 int pflags = malloc2vm_flags(how) | VM_ALLOC_NODUMP | VM_ALLOC_WIRED;
1762
1763 MPASS((flags & M_PKTHDR) == 0);
1764 MPASS((how & M_ZERO) == 0);
1765
1766 /*
1767 * len can be zero or an arbitrary large value bound by
1768 * the total data supplied by the uio.
1769 */
1770 if (len > 0)
1771 total = MIN(uio->uio_resid, len);
1772 else
1773 total = uio->uio_resid;
1774
1775 if (maxseg == 0)
1776 maxseg = MBUF_PEXT_MAX_PGS * PAGE_SIZE;
1777
1778 /*
1779 * If total is zero, return an empty mbuf. This can occur
1780 * for TLS 1.0 connections which send empty fragments as
1781 * a countermeasure against the known-IV weakness in CBC
1782 * ciphersuites.
1783 */
1784 if (__predict_false(total == 0)) {
1785 mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs);
1786 if (mb == NULL)
1787 return (NULL);
1788 mb->m_epg_flags = EPG_FLAG_ANON;
1789 return (mb);
1790 }
1791
1792 /*
1793 * Allocate the pages
1794 */
1795 m = NULL;
1796 while (total > 0) {
1797 mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs);
1798 if (mb == NULL)
1799 goto failed;
1800 if (m == NULL)
1801 m = mb;
1802 else
1803 prev->m_next = mb;
1804 prev = mb;
1805 mb->m_epg_flags = EPG_FLAG_ANON;
1806 needed = length = MIN(maxseg, total);
1807 for (i = 0; needed > 0; i++, needed -= PAGE_SIZE) {
1808 retry_page:
1809 pg_array[i] = vm_page_alloc_noobj(pflags);
1810 if (pg_array[i] == NULL) {
1811 if (how & M_NOWAIT) {
1812 goto failed;
1813 } else {
1814 vm_wait(NULL);
1815 goto retry_page;
1816 }
1817 }
1818 mb->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg_array[i]);
1819 mb->m_epg_npgs++;
1820 }
1821 mb->m_epg_last_len = length - PAGE_SIZE * (mb->m_epg_npgs - 1);
1822 MBUF_EXT_PGS_ASSERT_SANITY(mb);
1823 total -= length;
1824 error = uiomove_fromphys(pg_array, 0, length, uio);
1825 if (error != 0)
1826 goto failed;
1827 mb->m_len = length;
1828 mb->m_ext.ext_size += PAGE_SIZE * mb->m_epg_npgs;
1829 if (flags & M_PKTHDR)
1830 m->m_pkthdr.len += length;
1831 }
1832 return (m);
1833
1834 failed:
1835 m_freem(m);
1836 return (NULL);
1837 }
1838
1839 /*
1840 * Copy the contents of uio into a properly sized mbuf chain.
1841 */
1842 struct mbuf *
1843 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1844 {
1845 struct mbuf *m, *mb;
1846 int error, length;
1847 ssize_t total;
1848 int progress = 0;
1849
1850 if (flags & M_EXTPG)
1851 return (m_uiotombuf_nomap(uio, how, len, align, flags));
1852
1853 /*
1854 * len can be zero or an arbitrary large value bound by
1855 * the total data supplied by the uio.
1856 */
1857 if (len > 0)
1858 total = (uio->uio_resid < len) ? uio->uio_resid : len;
1859 else
1860 total = uio->uio_resid;
1861
1862 /*
1863 * The smallest unit returned by m_getm2() is a single mbuf
1864 * with pkthdr. We can't align past it.
1865 */
1866 if (align >= MHLEN)
1867 return (NULL);
1868
1869 /*
1870 * Give us the full allocation or nothing.
1871 * If len is zero return the smallest empty mbuf.
1872 */
1873 m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1874 if (m == NULL)
1875 return (NULL);
1876 m->m_data += align;
1877
1878 /* Fill all mbufs with uio data and update header information. */
1879 for (mb = m; mb != NULL; mb = mb->m_next) {
1880 length = min(M_TRAILINGSPACE(mb), total - progress);
1881
1882 error = uiomove(mtod(mb, void *), length, uio);
1883 if (error) {
1884 m_freem(m);
1885 return (NULL);
1886 }
1887
1888 mb->m_len = length;
1889 progress += length;
1890 if (flags & M_PKTHDR)
1891 m->m_pkthdr.len += length;
1892 }
1893 KASSERT(progress == total, ("%s: progress != total", __func__));
1894
1895 return (m);
1896 }
1897
1898 /*
1899 * Copy data to/from an unmapped mbuf into a uio limited by len if set.
1900 */
1901 int
1902 m_unmapped_uiomove(const struct mbuf *m, int m_off, struct uio *uio, int len)
1903 {
1904 vm_page_t pg;
1905 int error, i, off, pglen, pgoff, seglen, segoff;
1906
1907 M_ASSERTEXTPG(m);
1908 error = 0;
1909
1910 /* Skip over any data removed from the front. */
1911 off = mtod(m, vm_offset_t);
1912
1913 off += m_off;
1914 if (m->m_epg_hdrlen != 0) {
1915 if (off >= m->m_epg_hdrlen) {
1916 off -= m->m_epg_hdrlen;
1917 } else {
1918 seglen = m->m_epg_hdrlen - off;
1919 segoff = off;
1920 seglen = min(seglen, len);
1921 off = 0;
1922 len -= seglen;
1923 error = uiomove(__DECONST(void *,
1924 &m->m_epg_hdr[segoff]), seglen, uio);
1925 }
1926 }
1927 pgoff = m->m_epg_1st_off;
1928 for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
1929 pglen = m_epg_pagelen(m, i, pgoff);
1930 if (off >= pglen) {
1931 off -= pglen;
1932 pgoff = 0;
1933 continue;
1934 }
1935 seglen = pglen - off;
1936 segoff = pgoff + off;
1937 off = 0;
1938 seglen = min(seglen, len);
1939 len -= seglen;
1940 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
1941 error = uiomove_fromphys(&pg, segoff, seglen, uio);
1942 pgoff = 0;
1943 };
1944 if (len != 0 && error == 0) {
1945 KASSERT((off + len) <= m->m_epg_trllen,
1946 ("off + len > trail (%d + %d > %d, m_off = %d)", off, len,
1947 m->m_epg_trllen, m_off));
1948 error = uiomove(__DECONST(void *, &m->m_epg_trail[off]),
1949 len, uio);
1950 }
1951 return (error);
1952 }
1953
1954 /*
1955 * Copy an mbuf chain into a uio limited by len if set.
1956 */
1957 int
1958 m_mbuftouio(struct uio *uio, const struct mbuf *m, int len)
1959 {
1960 int error, length, total;
1961 int progress = 0;
1962
1963 if (len > 0)
1964 total = min(uio->uio_resid, len);
1965 else
1966 total = uio->uio_resid;
1967
1968 /* Fill the uio with data from the mbufs. */
1969 for (; m != NULL; m = m->m_next) {
1970 length = min(m->m_len, total - progress);
1971
1972 if ((m->m_flags & M_EXTPG) != 0)
1973 error = m_unmapped_uiomove(m, 0, uio, length);
1974 else
1975 error = uiomove(mtod(m, void *), length, uio);
1976 if (error)
1977 return (error);
1978
1979 progress += length;
1980 }
1981
1982 return (0);
1983 }
1984
1985 /*
1986 * Create a writable copy of the mbuf chain. While doing this
1987 * we compact the chain with a goal of producing a chain with
1988 * at most two mbufs. The second mbuf in this chain is likely
1989 * to be a cluster. The primary purpose of this work is to create
1990 * a writable packet for encryption, compression, etc. The
1991 * secondary goal is to linearize the data so the data can be
1992 * passed to crypto hardware in the most efficient manner possible.
1993 */
1994 struct mbuf *
1995 m_unshare(struct mbuf *m0, int how)
1996 {
1997 struct mbuf *m, *mprev;
1998 struct mbuf *n, *mfirst, *mlast;
1999 int len, off;
2000
2001 mprev = NULL;
2002 for (m = m0; m != NULL; m = mprev->m_next) {
2003 /*
2004 * Regular mbufs are ignored unless there's a cluster
2005 * in front of it that we can use to coalesce. We do
2006 * the latter mainly so later clusters can be coalesced
2007 * also w/o having to handle them specially (i.e. convert
2008 * mbuf+cluster -> cluster). This optimization is heavily
2009 * influenced by the assumption that we're running over
2010 * Ethernet where MCLBYTES is large enough that the max
2011 * packet size will permit lots of coalescing into a
2012 * single cluster. This in turn permits efficient
2013 * crypto operations, especially when using hardware.
2014 */
2015 if ((m->m_flags & M_EXT) == 0) {
2016 if (mprev && (mprev->m_flags & M_EXT) &&
2017 m->m_len <= M_TRAILINGSPACE(mprev)) {
2018 /* XXX: this ignores mbuf types */
2019 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
2020 mtod(m, caddr_t), m->m_len);
2021 mprev->m_len += m->m_len;
2022 mprev->m_next = m->m_next; /* unlink from chain */
2023 m_free(m); /* reclaim mbuf */
2024 } else {
2025 mprev = m;
2026 }
2027 continue;
2028 }
2029 /*
2030 * Writable mbufs are left alone (for now).
2031 */
2032 if (M_WRITABLE(m)) {
2033 mprev = m;
2034 continue;
2035 }
2036
2037 /*
2038 * Not writable, replace with a copy or coalesce with
2039 * the previous mbuf if possible (since we have to copy
2040 * it anyway, we try to reduce the number of mbufs and
2041 * clusters so that future work is easier).
2042 */
2043 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
2044 /* NB: we only coalesce into a cluster or larger */
2045 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
2046 m->m_len <= M_TRAILINGSPACE(mprev)) {
2047 /* XXX: this ignores mbuf types */
2048 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
2049 mtod(m, caddr_t), m->m_len);
2050 mprev->m_len += m->m_len;
2051 mprev->m_next = m->m_next; /* unlink from chain */
2052 m_free(m); /* reclaim mbuf */
2053 continue;
2054 }
2055
2056 /*
2057 * Allocate new space to hold the copy and copy the data.
2058 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
2059 * splitting them into clusters. We could just malloc a
2060 * buffer and make it external but too many device drivers
2061 * don't know how to break up the non-contiguous memory when
2062 * doing DMA.
2063 */
2064 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
2065 if (n == NULL) {
2066 m_freem(m0);
2067 return (NULL);
2068 }
2069 if (m->m_flags & M_PKTHDR) {
2070 KASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR",
2071 __func__, m0, m));
2072 m_move_pkthdr(n, m);
2073 }
2074 len = m->m_len;
2075 off = 0;
2076 mfirst = n;
2077 mlast = NULL;
2078 for (;;) {
2079 int cc = min(len, MCLBYTES);
2080 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
2081 n->m_len = cc;
2082 if (mlast != NULL)
2083 mlast->m_next = n;
2084 mlast = n;
2085 #if 0
2086 newipsecstat.ips_clcopied++;
2087 #endif
2088
2089 len -= cc;
2090 if (len <= 0)
2091 break;
2092 off += cc;
2093
2094 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
2095 if (n == NULL) {
2096 m_freem(mfirst);
2097 m_freem(m0);
2098 return (NULL);
2099 }
2100 }
2101 n->m_next = m->m_next;
2102 if (mprev == NULL)
2103 m0 = mfirst; /* new head of chain */
2104 else
2105 mprev->m_next = mfirst; /* replace old mbuf */
2106 m_free(m); /* release old mbuf */
2107 mprev = mfirst;
2108 }
2109 return (m0);
2110 }
2111
2112 #ifdef MBUF_PROFILING
2113
2114 #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/
2115 struct mbufprofile {
2116 uintmax_t wasted[MP_BUCKETS];
2117 uintmax_t used[MP_BUCKETS];
2118 uintmax_t segments[MP_BUCKETS];
2119 } mbprof;
2120
2121 #define MP_MAXDIGITS 21 /* strlen("16,000,000,000,000,000,000") == 21 */
2122 #define MP_NUMLINES 6
2123 #define MP_NUMSPERLINE 16
2124 #define MP_EXTRABYTES 64 /* > strlen("used:\nwasted:\nsegments:\n") */
2125 /* work out max space needed and add a bit of spare space too */
2126 #define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE)
2127 #define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES)
2128
2129 char mbprofbuf[MP_BUFSIZE];
2130
2131 void
2132 m_profile(struct mbuf *m)
2133 {
2134 int segments = 0;
2135 int used = 0;
2136 int wasted = 0;
2137
2138 while (m) {
2139 segments++;
2140 used += m->m_len;
2141 if (m->m_flags & M_EXT) {
2142 wasted += MHLEN - sizeof(m->m_ext) +
2143 m->m_ext.ext_size - m->m_len;
2144 } else {
2145 if (m->m_flags & M_PKTHDR)
2146 wasted += MHLEN - m->m_len;
2147 else
2148 wasted += MLEN - m->m_len;
2149 }
2150 m = m->m_next;
2151 }
2152 /* be paranoid.. it helps */
2153 if (segments > MP_BUCKETS - 1)
2154 segments = MP_BUCKETS - 1;
2155 if (used > 100000)
2156 used = 100000;
2157 if (wasted > 100000)
2158 wasted = 100000;
2159 /* store in the appropriate bucket */
2160 /* don't bother locking. if it's slightly off, so what? */
2161 mbprof.segments[segments]++;
2162 mbprof.used[fls(used)]++;
2163 mbprof.wasted[fls(wasted)]++;
2164 }
2165
2166 static void
2167 mbprof_textify(void)
2168 {
2169 int offset;
2170 char *c;
2171 uint64_t *p;
2172
2173 p = &mbprof.wasted[0];
2174 c = mbprofbuf;
2175 offset = snprintf(c, MP_MAXLINE + 10,
2176 "wasted:\n"
2177 "%ju %ju %ju %ju %ju %ju %ju %ju "
2178 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2179 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2180 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2181 #ifdef BIG_ARRAY
2182 p = &mbprof.wasted[16];
2183 c += offset;
2184 offset = snprintf(c, MP_MAXLINE,
2185 "%ju %ju %ju %ju %ju %ju %ju %ju "
2186 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2187 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2188 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2189 #endif
2190 p = &mbprof.used[0];
2191 c += offset;
2192 offset = snprintf(c, MP_MAXLINE + 10,
2193 "used:\n"
2194 "%ju %ju %ju %ju %ju %ju %ju %ju "
2195 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2196 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2197 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2198 #ifdef BIG_ARRAY
2199 p = &mbprof.used[16];
2200 c += offset;
2201 offset = snprintf(c, MP_MAXLINE,
2202 "%ju %ju %ju %ju %ju %ju %ju %ju "
2203 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2204 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2205 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2206 #endif
2207 p = &mbprof.segments[0];
2208 c += offset;
2209 offset = snprintf(c, MP_MAXLINE + 10,
2210 "segments:\n"
2211 "%ju %ju %ju %ju %ju %ju %ju %ju "
2212 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2213 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2214 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2215 #ifdef BIG_ARRAY
2216 p = &mbprof.segments[16];
2217 c += offset;
2218 offset = snprintf(c, MP_MAXLINE,
2219 "%ju %ju %ju %ju %ju %ju %ju %ju "
2220 "%ju %ju %ju %ju %ju %ju %ju %jju",
2221 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2222 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2223 #endif
2224 }
2225
2226 static int
2227 mbprof_handler(SYSCTL_HANDLER_ARGS)
2228 {
2229 int error;
2230
2231 mbprof_textify();
2232 error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1);
2233 return (error);
2234 }
2235
2236 static int
2237 mbprof_clr_handler(SYSCTL_HANDLER_ARGS)
2238 {
2239 int clear, error;
2240
2241 clear = 0;
2242 error = sysctl_handle_int(oidp, &clear, 0, req);
2243 if (error || !req->newptr)
2244 return (error);
2245
2246 if (clear) {
2247 bzero(&mbprof, sizeof(mbprof));
2248 }
2249
2250 return (error);
2251 }
2252
2253 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile,
2254 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0,
2255 mbprof_handler, "A",
2256 "mbuf profiling statistics");
2257
2258 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr,
2259 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,
2260 mbprof_clr_handler, "I",
2261 "clear mbuf profiling statistics");
2262 #endif
Cache object: f7d056a2f98bdfcac9e59dfa5bd8503d
|