FreeBSD/Linux Kernel Cross Reference
sys/kern/uipc_mbuf.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_param.h"
38 #include "opt_mbuf_stress_test.h"
39 #include "opt_mbuf_profiling.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/limits.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/sysctl.h>
49 #include <sys/domain.h>
50 #include <sys/protosw.h>
51 #include <sys/uio.h>
52 #include <sys/vmmeter.h>
53 #include <sys/sbuf.h>
54 #include <sys/sdt.h>
55 #include <vm/vm.h>
56 #include <vm/vm_pageout.h>
57 #include <vm/vm_page.h>
58
59 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__init,
60 "struct mbuf *", "mbufinfo_t *",
61 "uint32_t", "uint32_t",
62 "uint16_t", "uint16_t",
63 "uint32_t", "uint32_t",
64 "uint32_t", "uint32_t");
65
66 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__gethdr,
67 "uint32_t", "uint32_t",
68 "uint16_t", "uint16_t",
69 "struct mbuf *", "mbufinfo_t *");
70
71 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__get,
72 "uint32_t", "uint32_t",
73 "uint16_t", "uint16_t",
74 "struct mbuf *", "mbufinfo_t *");
75
76 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__getcl,
77 "uint32_t", "uint32_t",
78 "uint16_t", "uint16_t",
79 "uint32_t", "uint32_t",
80 "struct mbuf *", "mbufinfo_t *");
81
82 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__getjcl,
83 "uint32_t", "uint32_t",
84 "uint16_t", "uint16_t",
85 "uint32_t", "uint32_t",
86 "uint32_t", "uint32_t",
87 "struct mbuf *", "mbufinfo_t *");
88
89 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__clget,
90 "struct mbuf *", "mbufinfo_t *",
91 "uint32_t", "uint32_t",
92 "uint32_t", "uint32_t");
93
94 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__cljget,
95 "struct mbuf *", "mbufinfo_t *",
96 "uint32_t", "uint32_t",
97 "uint32_t", "uint32_t",
98 "void*", "void*");
99
100 SDT_PROBE_DEFINE(sdt, , , m__cljset);
101
102 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__free,
103 "struct mbuf *", "mbufinfo_t *");
104
105 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__freem,
106 "struct mbuf *", "mbufinfo_t *");
107
108 #include <security/mac/mac_framework.h>
109
110 int max_linkhdr;
111 int max_protohdr;
112 int max_hdr;
113 int max_datalen;
114 #ifdef MBUF_STRESS_TEST
115 int m_defragpackets;
116 int m_defragbytes;
117 int m_defraguseless;
118 int m_defragfailure;
119 int m_defragrandomfailures;
120 #endif
121
122 /*
123 * sysctl(8) exported objects
124 */
125 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
126 &max_linkhdr, 0, "Size of largest link layer header");
127 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
128 &max_protohdr, 0, "Size of largest protocol layer header");
129 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
130 &max_hdr, 0, "Size of largest link plus protocol header");
131 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
132 &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
133 #ifdef MBUF_STRESS_TEST
134 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
135 &m_defragpackets, 0, "");
136 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
137 &m_defragbytes, 0, "");
138 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
139 &m_defraguseless, 0, "");
140 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
141 &m_defragfailure, 0, "");
142 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
143 &m_defragrandomfailures, 0, "");
144 #endif
145
146 /*
147 * Ensure the correct size of various mbuf parameters. It could be off due
148 * to compiler-induced padding and alignment artifacts.
149 */
150 CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN);
151 CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN);
152
153 /*
154 * mbuf data storage should be 64-bit aligned regardless of architectural
155 * pointer size; check this is the case with and without a packet header.
156 */
157 CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0);
158 CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0);
159
160 /*
161 * While the specific values here don't matter too much (i.e., +/- a few
162 * words), we do want to ensure that changes to these values are carefully
163 * reasoned about and properly documented. This is especially the case as
164 * network-protocol and device-driver modules encode these layouts, and must
165 * be recompiled if the structures change. Check these values at compile time
166 * against the ones documented in comments in mbuf.h.
167 *
168 * NB: Possibly they should be documented there via #define's and not just
169 * comments.
170 */
171 #if defined(__LP64__)
172 CTASSERT(offsetof(struct mbuf, m_dat) == 32);
173 CTASSERT(sizeof(struct pkthdr) == 56);
174 CTASSERT(sizeof(struct m_ext) == 160);
175 #else
176 CTASSERT(offsetof(struct mbuf, m_dat) == 24);
177 CTASSERT(sizeof(struct pkthdr) == 48);
178 #if defined(__powerpc__) && defined(BOOKE)
179 /* PowerPC booke has 64-bit physical pointers. */
180 CTASSERT(sizeof(struct m_ext) == 184);
181 #else
182 CTASSERT(sizeof(struct m_ext) == 180);
183 #endif
184 #endif
185
186 /*
187 * Assert that the queue(3) macros produce code of the same size as an old
188 * plain pointer does.
189 */
190 #ifdef INVARIANTS
191 static struct mbuf __used m_assertbuf;
192 CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next));
193 CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next));
194 CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt));
195 CTASSERT(sizeof(m_assertbuf.m_stailqpkt) == sizeof(m_assertbuf.m_nextpkt));
196 #endif
197
198 /*
199 * Attach the cluster from *m to *n, set up m_ext in *n
200 * and bump the refcount of the cluster.
201 */
202 void
203 mb_dupcl(struct mbuf *n, struct mbuf *m)
204 {
205 volatile u_int *refcnt;
206
207 KASSERT(m->m_flags & (M_EXT|M_EXTPG),
208 ("%s: M_EXT|M_EXTPG not set on %p", __func__, m));
209 KASSERT(!(n->m_flags & (M_EXT|M_EXTPG)),
210 ("%s: M_EXT|M_EXTPG set on %p", __func__, n));
211
212 /*
213 * Cache access optimization.
214 *
215 * o Regular M_EXT storage doesn't need full copy of m_ext, since
216 * the holder of the 'ext_count' is responsible to carry the free
217 * routine and its arguments.
218 * o M_EXTPG data is split between main part of mbuf and m_ext, the
219 * main part is copied in full, the m_ext part is similar to M_EXT.
220 * o EXT_EXTREF, where 'ext_cnt' doesn't point into mbuf at all, is
221 * special - it needs full copy of m_ext into each mbuf, since any
222 * copy could end up as the last to free.
223 */
224 if (m->m_flags & M_EXTPG) {
225 bcopy(&m->m_epg_startcopy, &n->m_epg_startcopy,
226 __rangeof(struct mbuf, m_epg_startcopy, m_epg_endcopy));
227 bcopy(&m->m_ext, &n->m_ext, m_epg_ext_copylen);
228 } else if (m->m_ext.ext_type == EXT_EXTREF)
229 bcopy(&m->m_ext, &n->m_ext, sizeof(struct m_ext));
230 else
231 bcopy(&m->m_ext, &n->m_ext, m_ext_copylen);
232
233 n->m_flags |= m->m_flags & (M_RDONLY | M_EXT | M_EXTPG);
234
235 /* See if this is the mbuf that holds the embedded refcount. */
236 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
237 refcnt = n->m_ext.ext_cnt = &m->m_ext.ext_count;
238 n->m_ext.ext_flags &= ~EXT_FLAG_EMBREF;
239 } else {
240 KASSERT(m->m_ext.ext_cnt != NULL,
241 ("%s: no refcounting pointer on %p", __func__, m));
242 refcnt = m->m_ext.ext_cnt;
243 }
244
245 if (*refcnt == 1)
246 *refcnt += 1;
247 else
248 atomic_add_int(refcnt, 1);
249 }
250
251 void
252 m_demote_pkthdr(struct mbuf *m)
253 {
254
255 M_ASSERTPKTHDR(m);
256 M_ASSERT_NO_SND_TAG(m);
257
258 m_tag_delete_chain(m, NULL);
259 m->m_flags &= ~M_PKTHDR;
260 bzero(&m->m_pkthdr, sizeof(struct pkthdr));
261 }
262
263 /*
264 * Clean up mbuf (chain) from any tags and packet headers.
265 * If "all" is set then the first mbuf in the chain will be
266 * cleaned too.
267 */
268 void
269 m_demote(struct mbuf *m0, int all, int flags)
270 {
271 struct mbuf *m;
272
273 flags |= M_DEMOTEFLAGS;
274
275 for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
276 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p",
277 __func__, m, m0));
278 if (m->m_flags & M_PKTHDR)
279 m_demote_pkthdr(m);
280 m->m_flags &= flags;
281 }
282 }
283
284 /*
285 * Sanity checks on mbuf (chain) for use in KASSERT() and general
286 * debugging.
287 * Returns 0 or panics when bad and 1 on all tests passed.
288 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
289 * blow up later.
290 */
291 int
292 m_sanity(struct mbuf *m0, int sanitize)
293 {
294 struct mbuf *m;
295 caddr_t a, b;
296 int pktlen = 0;
297
298 #ifdef INVARIANTS
299 #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m)
300 #else
301 #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m)
302 #endif
303
304 for (m = m0; m != NULL; m = m->m_next) {
305 /*
306 * Basic pointer checks. If any of these fails then some
307 * unrelated kernel memory before or after us is trashed.
308 * No way to recover from that.
309 */
310 a = M_START(m);
311 b = a + M_SIZE(m);
312 if ((caddr_t)m->m_data < a)
313 M_SANITY_ACTION("m_data outside mbuf data range left");
314 if ((caddr_t)m->m_data > b)
315 M_SANITY_ACTION("m_data outside mbuf data range right");
316 if ((caddr_t)m->m_data + m->m_len > b)
317 M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
318
319 /* m->m_nextpkt may only be set on first mbuf in chain. */
320 if (m != m0 && m->m_nextpkt != NULL) {
321 if (sanitize) {
322 m_freem(m->m_nextpkt);
323 m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
324 } else
325 M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
326 }
327
328 /* packet length (not mbuf length!) calculation */
329 if (m0->m_flags & M_PKTHDR)
330 pktlen += m->m_len;
331
332 /* m_tags may only be attached to first mbuf in chain. */
333 if (m != m0 && m->m_flags & M_PKTHDR &&
334 !SLIST_EMPTY(&m->m_pkthdr.tags)) {
335 if (sanitize) {
336 m_tag_delete_chain(m, NULL);
337 /* put in 0xDEADC0DE perhaps? */
338 } else
339 M_SANITY_ACTION("m_tags on in-chain mbuf");
340 }
341
342 /* M_PKTHDR may only be set on first mbuf in chain */
343 if (m != m0 && m->m_flags & M_PKTHDR) {
344 if (sanitize) {
345 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
346 m->m_flags &= ~M_PKTHDR;
347 /* put in 0xDEADCODE and leave hdr flag in */
348 } else
349 M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
350 }
351 }
352 m = m0;
353 if (pktlen && pktlen != m->m_pkthdr.len) {
354 if (sanitize)
355 m->m_pkthdr.len = 0;
356 else
357 M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
358 }
359 return 1;
360
361 #undef M_SANITY_ACTION
362 }
363
364 /*
365 * Non-inlined part of m_init().
366 */
367 int
368 m_pkthdr_init(struct mbuf *m, int how)
369 {
370 #ifdef MAC
371 int error;
372 #endif
373 m->m_data = m->m_pktdat;
374 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
375 #ifdef NUMA
376 m->m_pkthdr.numa_domain = M_NODOM;
377 #endif
378 #ifdef MAC
379 /* If the label init fails, fail the alloc */
380 error = mac_mbuf_init(m, how);
381 if (error)
382 return (error);
383 #endif
384
385 return (0);
386 }
387
388 /*
389 * "Move" mbuf pkthdr from "from" to "to".
390 * "from" must have M_PKTHDR set, and "to" must be empty.
391 */
392 void
393 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
394 {
395
396 #if 0
397 /* see below for why these are not enabled */
398 M_ASSERTPKTHDR(to);
399 /* Note: with MAC, this may not be a good assertion. */
400 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
401 ("m_move_pkthdr: to has tags"));
402 #endif
403 #ifdef MAC
404 /*
405 * XXXMAC: It could be this should also occur for non-MAC?
406 */
407 if (to->m_flags & M_PKTHDR)
408 m_tag_delete_chain(to, NULL);
409 #endif
410 to->m_flags = (from->m_flags & M_COPYFLAGS) |
411 (to->m_flags & (M_EXT | M_EXTPG));
412 if ((to->m_flags & M_EXT) == 0)
413 to->m_data = to->m_pktdat;
414 to->m_pkthdr = from->m_pkthdr; /* especially tags */
415 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
416 from->m_flags &= ~M_PKTHDR;
417 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG) {
418 from->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
419 from->m_pkthdr.snd_tag = NULL;
420 }
421 }
422
423 /*
424 * Duplicate "from"'s mbuf pkthdr in "to".
425 * "from" must have M_PKTHDR set, and "to" must be empty.
426 * In particular, this does a deep copy of the packet tags.
427 */
428 int
429 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
430 {
431
432 #if 0
433 /*
434 * The mbuf allocator only initializes the pkthdr
435 * when the mbuf is allocated with m_gethdr(). Many users
436 * (e.g. m_copy*, m_prepend) use m_get() and then
437 * smash the pkthdr as needed causing these
438 * assertions to trip. For now just disable them.
439 */
440 M_ASSERTPKTHDR(to);
441 /* Note: with MAC, this may not be a good assertion. */
442 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
443 #endif
444 MBUF_CHECKSLEEP(how);
445 #ifdef MAC
446 if (to->m_flags & M_PKTHDR)
447 m_tag_delete_chain(to, NULL);
448 #endif
449 to->m_flags = (from->m_flags & M_COPYFLAGS) |
450 (to->m_flags & (M_EXT | M_EXTPG));
451 if ((to->m_flags & M_EXT) == 0)
452 to->m_data = to->m_pktdat;
453 to->m_pkthdr = from->m_pkthdr;
454 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG)
455 m_snd_tag_ref(from->m_pkthdr.snd_tag);
456 SLIST_INIT(&to->m_pkthdr.tags);
457 return (m_tag_copy_chain(to, from, how));
458 }
459
460 /*
461 * Lesser-used path for M_PREPEND:
462 * allocate new mbuf to prepend to chain,
463 * copy junk along.
464 */
465 struct mbuf *
466 m_prepend(struct mbuf *m, int len, int how)
467 {
468 struct mbuf *mn;
469
470 if (m->m_flags & M_PKTHDR)
471 mn = m_gethdr(how, m->m_type);
472 else
473 mn = m_get(how, m->m_type);
474 if (mn == NULL) {
475 m_freem(m);
476 return (NULL);
477 }
478 if (m->m_flags & M_PKTHDR)
479 m_move_pkthdr(mn, m);
480 mn->m_next = m;
481 m = mn;
482 if (len < M_SIZE(m))
483 M_ALIGN(m, len);
484 m->m_len = len;
485 return (m);
486 }
487
488 /*
489 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
490 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
491 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller.
492 * Note that the copy is read-only, because clusters are not copied,
493 * only their reference counts are incremented.
494 */
495 struct mbuf *
496 m_copym(struct mbuf *m, int off0, int len, int wait)
497 {
498 struct mbuf *n, **np;
499 int off = off0;
500 struct mbuf *top;
501 int copyhdr = 0;
502
503 KASSERT(off >= 0, ("m_copym, negative off %d", off));
504 KASSERT(len >= 0, ("m_copym, negative len %d", len));
505 MBUF_CHECKSLEEP(wait);
506 if (off == 0 && m->m_flags & M_PKTHDR)
507 copyhdr = 1;
508 while (off > 0) {
509 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
510 if (off < m->m_len)
511 break;
512 off -= m->m_len;
513 m = m->m_next;
514 }
515 np = ⊤
516 top = NULL;
517 while (len > 0) {
518 if (m == NULL) {
519 KASSERT(len == M_COPYALL,
520 ("m_copym, length > size of mbuf chain"));
521 break;
522 }
523 if (copyhdr)
524 n = m_gethdr(wait, m->m_type);
525 else
526 n = m_get(wait, m->m_type);
527 *np = n;
528 if (n == NULL)
529 goto nospace;
530 if (copyhdr) {
531 if (!m_dup_pkthdr(n, m, wait))
532 goto nospace;
533 if (len == M_COPYALL)
534 n->m_pkthdr.len -= off0;
535 else
536 n->m_pkthdr.len = len;
537 copyhdr = 0;
538 }
539 n->m_len = min(len, m->m_len - off);
540 if (m->m_flags & (M_EXT|M_EXTPG)) {
541 n->m_data = m->m_data + off;
542 mb_dupcl(n, m);
543 } else
544 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
545 (u_int)n->m_len);
546 if (len != M_COPYALL)
547 len -= n->m_len;
548 off = 0;
549 m = m->m_next;
550 np = &n->m_next;
551 }
552
553 return (top);
554 nospace:
555 m_freem(top);
556 return (NULL);
557 }
558
559 /*
560 * Copy an entire packet, including header (which must be present).
561 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
562 * Note that the copy is read-only, because clusters are not copied,
563 * only their reference counts are incremented.
564 * Preserve alignment of the first mbuf so if the creator has left
565 * some room at the beginning (e.g. for inserting protocol headers)
566 * the copies still have the room available.
567 */
568 struct mbuf *
569 m_copypacket(struct mbuf *m, int how)
570 {
571 struct mbuf *top, *n, *o;
572
573 MBUF_CHECKSLEEP(how);
574 n = m_get(how, m->m_type);
575 top = n;
576 if (n == NULL)
577 goto nospace;
578
579 if (!m_dup_pkthdr(n, m, how))
580 goto nospace;
581 n->m_len = m->m_len;
582 if (m->m_flags & (M_EXT|M_EXTPG)) {
583 n->m_data = m->m_data;
584 mb_dupcl(n, m);
585 } else {
586 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
587 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
588 }
589
590 m = m->m_next;
591 while (m) {
592 o = m_get(how, m->m_type);
593 if (o == NULL)
594 goto nospace;
595
596 n->m_next = o;
597 n = n->m_next;
598
599 n->m_len = m->m_len;
600 if (m->m_flags & (M_EXT|M_EXTPG)) {
601 n->m_data = m->m_data;
602 mb_dupcl(n, m);
603 } else {
604 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
605 }
606
607 m = m->m_next;
608 }
609 return top;
610 nospace:
611 m_freem(top);
612 return (NULL);
613 }
614
615 static void
616 m_copyfromunmapped(const struct mbuf *m, int off, int len, caddr_t cp)
617 {
618 struct iovec iov;
619 struct uio uio;
620 int error;
621
622 KASSERT(off >= 0, ("m_copyfromunmapped: negative off %d", off));
623 KASSERT(len >= 0, ("m_copyfromunmapped: negative len %d", len));
624 KASSERT(off < m->m_len,
625 ("m_copyfromunmapped: len exceeds mbuf length"));
626 iov.iov_base = cp;
627 iov.iov_len = len;
628 uio.uio_resid = len;
629 uio.uio_iov = &iov;
630 uio.uio_segflg = UIO_SYSSPACE;
631 uio.uio_iovcnt = 1;
632 uio.uio_offset = 0;
633 uio.uio_rw = UIO_READ;
634 error = m_unmapped_uiomove(m, off, &uio, len);
635 KASSERT(error == 0, ("m_unmapped_uiomove failed: off %d, len %d", off,
636 len));
637 }
638
639 /*
640 * Copy data from an mbuf chain starting "off" bytes from the beginning,
641 * continuing for "len" bytes, into the indicated buffer.
642 */
643 void
644 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
645 {
646 u_int count;
647
648 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
649 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
650 while (off > 0) {
651 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
652 if (off < m->m_len)
653 break;
654 off -= m->m_len;
655 m = m->m_next;
656 }
657 while (len > 0) {
658 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
659 count = min(m->m_len - off, len);
660 if ((m->m_flags & M_EXTPG) != 0)
661 m_copyfromunmapped(m, off, count, cp);
662 else
663 bcopy(mtod(m, caddr_t) + off, cp, count);
664 len -= count;
665 cp += count;
666 off = 0;
667 m = m->m_next;
668 }
669 }
670
671 /*
672 * Copy a packet header mbuf chain into a completely new chain, including
673 * copying any mbuf clusters. Use this instead of m_copypacket() when
674 * you need a writable copy of an mbuf chain.
675 */
676 struct mbuf *
677 m_dup(const struct mbuf *m, int how)
678 {
679 struct mbuf **p, *top = NULL;
680 int remain, moff, nsize;
681
682 MBUF_CHECKSLEEP(how);
683 /* Sanity check */
684 if (m == NULL)
685 return (NULL);
686 M_ASSERTPKTHDR(m);
687
688 /* While there's more data, get a new mbuf, tack it on, and fill it */
689 remain = m->m_pkthdr.len;
690 moff = 0;
691 p = ⊤
692 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
693 struct mbuf *n;
694
695 /* Get the next new mbuf */
696 if (remain >= MINCLSIZE) {
697 n = m_getcl(how, m->m_type, 0);
698 nsize = MCLBYTES;
699 } else {
700 n = m_get(how, m->m_type);
701 nsize = MLEN;
702 }
703 if (n == NULL)
704 goto nospace;
705
706 if (top == NULL) { /* First one, must be PKTHDR */
707 if (!m_dup_pkthdr(n, m, how)) {
708 m_free(n);
709 goto nospace;
710 }
711 if ((n->m_flags & M_EXT) == 0)
712 nsize = MHLEN;
713 n->m_flags &= ~M_RDONLY;
714 }
715 n->m_len = 0;
716
717 /* Link it into the new chain */
718 *p = n;
719 p = &n->m_next;
720
721 /* Copy data from original mbuf(s) into new mbuf */
722 while (n->m_len < nsize && m != NULL) {
723 int chunk = min(nsize - n->m_len, m->m_len - moff);
724
725 m_copydata(m, moff, chunk, n->m_data + n->m_len);
726 moff += chunk;
727 n->m_len += chunk;
728 remain -= chunk;
729 if (moff == m->m_len) {
730 m = m->m_next;
731 moff = 0;
732 }
733 }
734
735 /* Check correct total mbuf length */
736 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
737 ("%s: bogus m_pkthdr.len", __func__));
738 }
739 return (top);
740
741 nospace:
742 m_freem(top);
743 return (NULL);
744 }
745
746 /*
747 * Concatenate mbuf chain n to m.
748 * Both chains must be of the same type (e.g. MT_DATA).
749 * Any m_pkthdr is not updated.
750 */
751 void
752 m_cat(struct mbuf *m, struct mbuf *n)
753 {
754 while (m->m_next)
755 m = m->m_next;
756 while (n) {
757 if (!M_WRITABLE(m) ||
758 (n->m_flags & M_EXTPG) != 0 ||
759 M_TRAILINGSPACE(m) < n->m_len) {
760 /* just join the two chains */
761 m->m_next = n;
762 return;
763 }
764 /* splat the data from one into the other */
765 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
766 (u_int)n->m_len);
767 m->m_len += n->m_len;
768 n = m_free(n);
769 }
770 }
771
772 /*
773 * Concatenate two pkthdr mbuf chains.
774 */
775 void
776 m_catpkt(struct mbuf *m, struct mbuf *n)
777 {
778
779 M_ASSERTPKTHDR(m);
780 M_ASSERTPKTHDR(n);
781
782 m->m_pkthdr.len += n->m_pkthdr.len;
783 m_demote(n, 1, 0);
784
785 m_cat(m, n);
786 }
787
788 void
789 m_adj(struct mbuf *mp, int req_len)
790 {
791 int len = req_len;
792 struct mbuf *m;
793 int count;
794
795 if ((m = mp) == NULL)
796 return;
797 if (len >= 0) {
798 /*
799 * Trim from head.
800 */
801 while (m != NULL && len > 0) {
802 if (m->m_len <= len) {
803 len -= m->m_len;
804 m->m_len = 0;
805 m = m->m_next;
806 } else {
807 m->m_len -= len;
808 m->m_data += len;
809 len = 0;
810 }
811 }
812 if (mp->m_flags & M_PKTHDR)
813 mp->m_pkthdr.len -= (req_len - len);
814 } else {
815 /*
816 * Trim from tail. Scan the mbuf chain,
817 * calculating its length and finding the last mbuf.
818 * If the adjustment only affects this mbuf, then just
819 * adjust and return. Otherwise, rescan and truncate
820 * after the remaining size.
821 */
822 len = -len;
823 count = 0;
824 for (;;) {
825 count += m->m_len;
826 if (m->m_next == (struct mbuf *)0)
827 break;
828 m = m->m_next;
829 }
830 if (m->m_len >= len) {
831 m->m_len -= len;
832 if (mp->m_flags & M_PKTHDR)
833 mp->m_pkthdr.len -= len;
834 return;
835 }
836 count -= len;
837 if (count < 0)
838 count = 0;
839 /*
840 * Correct length for chain is "count".
841 * Find the mbuf with last data, adjust its length,
842 * and toss data from remaining mbufs on chain.
843 */
844 m = mp;
845 if (m->m_flags & M_PKTHDR)
846 m->m_pkthdr.len = count;
847 for (; m; m = m->m_next) {
848 if (m->m_len >= count) {
849 m->m_len = count;
850 if (m->m_next != NULL) {
851 m_freem(m->m_next);
852 m->m_next = NULL;
853 }
854 break;
855 }
856 count -= m->m_len;
857 }
858 }
859 }
860
861 void
862 m_adj_decap(struct mbuf *mp, int len)
863 {
864 uint8_t rsstype;
865
866 m_adj(mp, len);
867 if ((mp->m_flags & M_PKTHDR) != 0) {
868 /*
869 * If flowid was calculated by card from the inner
870 * headers, move flowid to the decapsulated mbuf
871 * chain, otherwise clear. This depends on the
872 * internals of m_adj, which keeps pkthdr as is, in
873 * particular not changing rsstype and flowid.
874 */
875 rsstype = mp->m_pkthdr.rsstype;
876 if ((rsstype & M_HASHTYPE_INNER) != 0) {
877 M_HASHTYPE_SET(mp, rsstype & ~M_HASHTYPE_INNER);
878 } else {
879 M_HASHTYPE_CLEAR(mp);
880 }
881 }
882 }
883
884 /*
885 * Rearange an mbuf chain so that len bytes are contiguous
886 * and in the data area of an mbuf (so that mtod will work
887 * for a structure of size len). Returns the resulting
888 * mbuf chain on success, frees it and returns null on failure.
889 * If there is room, it will add up to max_protohdr-len extra bytes to the
890 * contiguous region in an attempt to avoid being called next time.
891 */
892 struct mbuf *
893 m_pullup(struct mbuf *n, int len)
894 {
895 struct mbuf *m;
896 int count;
897 int space;
898
899 KASSERT((n->m_flags & M_EXTPG) == 0,
900 ("%s: unmapped mbuf %p", __func__, n));
901
902 /*
903 * If first mbuf has no cluster, and has room for len bytes
904 * without shifting current data, pullup into it,
905 * otherwise allocate a new mbuf to prepend to the chain.
906 */
907 if ((n->m_flags & M_EXT) == 0 &&
908 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
909 if (n->m_len >= len)
910 return (n);
911 m = n;
912 n = n->m_next;
913 len -= m->m_len;
914 } else {
915 if (len > MHLEN)
916 goto bad;
917 m = m_get(M_NOWAIT, n->m_type);
918 if (m == NULL)
919 goto bad;
920 if (n->m_flags & M_PKTHDR)
921 m_move_pkthdr(m, n);
922 }
923 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
924 do {
925 count = min(min(max(len, max_protohdr), space), n->m_len);
926 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
927 (u_int)count);
928 len -= count;
929 m->m_len += count;
930 n->m_len -= count;
931 space -= count;
932 if (n->m_len)
933 n->m_data += count;
934 else
935 n = m_free(n);
936 } while (len > 0 && n);
937 if (len > 0) {
938 (void) m_free(m);
939 goto bad;
940 }
941 m->m_next = n;
942 return (m);
943 bad:
944 m_freem(n);
945 return (NULL);
946 }
947
948 /*
949 * Like m_pullup(), except a new mbuf is always allocated, and we allow
950 * the amount of empty space before the data in the new mbuf to be specified
951 * (in the event that the caller expects to prepend later).
952 */
953 struct mbuf *
954 m_copyup(struct mbuf *n, int len, int dstoff)
955 {
956 struct mbuf *m;
957 int count, space;
958
959 if (len > (MHLEN - dstoff))
960 goto bad;
961 m = m_get(M_NOWAIT, n->m_type);
962 if (m == NULL)
963 goto bad;
964 if (n->m_flags & M_PKTHDR)
965 m_move_pkthdr(m, n);
966 m->m_data += dstoff;
967 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
968 do {
969 count = min(min(max(len, max_protohdr), space), n->m_len);
970 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
971 (unsigned)count);
972 len -= count;
973 m->m_len += count;
974 n->m_len -= count;
975 space -= count;
976 if (n->m_len)
977 n->m_data += count;
978 else
979 n = m_free(n);
980 } while (len > 0 && n);
981 if (len > 0) {
982 (void) m_free(m);
983 goto bad;
984 }
985 m->m_next = n;
986 return (m);
987 bad:
988 m_freem(n);
989 return (NULL);
990 }
991
992 /*
993 * Partition an mbuf chain in two pieces, returning the tail --
994 * all but the first len0 bytes. In case of failure, it returns NULL and
995 * attempts to restore the chain to its original state.
996 *
997 * Note that the resulting mbufs might be read-only, because the new
998 * mbuf can end up sharing an mbuf cluster with the original mbuf if
999 * the "breaking point" happens to lie within a cluster mbuf. Use the
1000 * M_WRITABLE() macro to check for this case.
1001 */
1002 struct mbuf *
1003 m_split(struct mbuf *m0, int len0, int wait)
1004 {
1005 struct mbuf *m, *n;
1006 u_int len = len0, remain;
1007
1008 MBUF_CHECKSLEEP(wait);
1009 for (m = m0; m && len > m->m_len; m = m->m_next)
1010 len -= m->m_len;
1011 if (m == NULL)
1012 return (NULL);
1013 remain = m->m_len - len;
1014 if (m0->m_flags & M_PKTHDR && remain == 0) {
1015 n = m_gethdr(wait, m0->m_type);
1016 if (n == NULL)
1017 return (NULL);
1018 n->m_next = m->m_next;
1019 m->m_next = NULL;
1020 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) {
1021 n->m_pkthdr.snd_tag =
1022 m_snd_tag_ref(m0->m_pkthdr.snd_tag);
1023 n->m_pkthdr.csum_flags |= CSUM_SND_TAG;
1024 } else
1025 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1026 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1027 m0->m_pkthdr.len = len0;
1028 return (n);
1029 } else if (m0->m_flags & M_PKTHDR) {
1030 n = m_gethdr(wait, m0->m_type);
1031 if (n == NULL)
1032 return (NULL);
1033 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) {
1034 n->m_pkthdr.snd_tag =
1035 m_snd_tag_ref(m0->m_pkthdr.snd_tag);
1036 n->m_pkthdr.csum_flags |= CSUM_SND_TAG;
1037 } else
1038 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1039 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1040 m0->m_pkthdr.len = len0;
1041 if (m->m_flags & (M_EXT|M_EXTPG))
1042 goto extpacket;
1043 if (remain > MHLEN) {
1044 /* m can't be the lead packet */
1045 M_ALIGN(n, 0);
1046 n->m_next = m_split(m, len, wait);
1047 if (n->m_next == NULL) {
1048 (void) m_free(n);
1049 return (NULL);
1050 } else {
1051 n->m_len = 0;
1052 return (n);
1053 }
1054 } else
1055 M_ALIGN(n, remain);
1056 } else if (remain == 0) {
1057 n = m->m_next;
1058 m->m_next = NULL;
1059 return (n);
1060 } else {
1061 n = m_get(wait, m->m_type);
1062 if (n == NULL)
1063 return (NULL);
1064 M_ALIGN(n, remain);
1065 }
1066 extpacket:
1067 if (m->m_flags & (M_EXT|M_EXTPG)) {
1068 n->m_data = m->m_data + len;
1069 mb_dupcl(n, m);
1070 } else {
1071 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1072 }
1073 n->m_len = remain;
1074 m->m_len = len;
1075 n->m_next = m->m_next;
1076 m->m_next = NULL;
1077 return (n);
1078 }
1079 /*
1080 * Routine to copy from device local memory into mbufs.
1081 * Note that `off' argument is offset into first mbuf of target chain from
1082 * which to begin copying the data to.
1083 */
1084 struct mbuf *
1085 m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
1086 void (*copy)(char *from, caddr_t to, u_int len))
1087 {
1088 struct mbuf *m;
1089 struct mbuf *top = NULL, **mp = ⊤
1090 int len;
1091
1092 if (off < 0 || off > MHLEN)
1093 return (NULL);
1094
1095 while (totlen > 0) {
1096 if (top == NULL) { /* First one, must be PKTHDR */
1097 if (totlen + off >= MINCLSIZE) {
1098 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1099 len = MCLBYTES;
1100 } else {
1101 m = m_gethdr(M_NOWAIT, MT_DATA);
1102 len = MHLEN;
1103
1104 /* Place initial small packet/header at end of mbuf */
1105 if (m && totlen + off + max_linkhdr <= MHLEN) {
1106 m->m_data += max_linkhdr;
1107 len -= max_linkhdr;
1108 }
1109 }
1110 if (m == NULL)
1111 return NULL;
1112 m->m_pkthdr.rcvif = ifp;
1113 m->m_pkthdr.len = totlen;
1114 } else {
1115 if (totlen + off >= MINCLSIZE) {
1116 m = m_getcl(M_NOWAIT, MT_DATA, 0);
1117 len = MCLBYTES;
1118 } else {
1119 m = m_get(M_NOWAIT, MT_DATA);
1120 len = MLEN;
1121 }
1122 if (m == NULL) {
1123 m_freem(top);
1124 return NULL;
1125 }
1126 }
1127 if (off) {
1128 m->m_data += off;
1129 len -= off;
1130 off = 0;
1131 }
1132 m->m_len = len = min(totlen, len);
1133 if (copy)
1134 copy(buf, mtod(m, caddr_t), (u_int)len);
1135 else
1136 bcopy(buf, mtod(m, caddr_t), (u_int)len);
1137 buf += len;
1138 *mp = m;
1139 mp = &m->m_next;
1140 totlen -= len;
1141 }
1142 return (top);
1143 }
1144
1145 static void
1146 m_copytounmapped(const struct mbuf *m, int off, int len, c_caddr_t cp)
1147 {
1148 struct iovec iov;
1149 struct uio uio;
1150 int error;
1151
1152 KASSERT(off >= 0, ("m_copytounmapped: negative off %d", off));
1153 KASSERT(len >= 0, ("m_copytounmapped: negative len %d", len));
1154 KASSERT(off < m->m_len, ("m_copytounmapped: len exceeds mbuf length"));
1155 iov.iov_base = __DECONST(caddr_t, cp);
1156 iov.iov_len = len;
1157 uio.uio_resid = len;
1158 uio.uio_iov = &iov;
1159 uio.uio_segflg = UIO_SYSSPACE;
1160 uio.uio_iovcnt = 1;
1161 uio.uio_offset = 0;
1162 uio.uio_rw = UIO_WRITE;
1163 error = m_unmapped_uiomove(m, off, &uio, len);
1164 KASSERT(error == 0, ("m_unmapped_uiomove failed: off %d, len %d", off,
1165 len));
1166 }
1167
1168 /*
1169 * Copy data from a buffer back into the indicated mbuf chain,
1170 * starting "off" bytes from the beginning, extending the mbuf
1171 * chain if necessary.
1172 */
1173 void
1174 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
1175 {
1176 int mlen;
1177 struct mbuf *m = m0, *n;
1178 int totlen = 0;
1179
1180 if (m0 == NULL)
1181 return;
1182 while (off > (mlen = m->m_len)) {
1183 off -= mlen;
1184 totlen += mlen;
1185 if (m->m_next == NULL) {
1186 n = m_get(M_NOWAIT, m->m_type);
1187 if (n == NULL)
1188 goto out;
1189 bzero(mtod(n, caddr_t), MLEN);
1190 n->m_len = min(MLEN, len + off);
1191 m->m_next = n;
1192 }
1193 m = m->m_next;
1194 }
1195 while (len > 0) {
1196 if (m->m_next == NULL && (len > m->m_len - off)) {
1197 m->m_len += min(len - (m->m_len - off),
1198 M_TRAILINGSPACE(m));
1199 }
1200 mlen = min (m->m_len - off, len);
1201 if ((m->m_flags & M_EXTPG) != 0)
1202 m_copytounmapped(m, off, mlen, cp);
1203 else
1204 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1205 cp += mlen;
1206 len -= mlen;
1207 mlen += off;
1208 off = 0;
1209 totlen += mlen;
1210 if (len == 0)
1211 break;
1212 if (m->m_next == NULL) {
1213 n = m_get(M_NOWAIT, m->m_type);
1214 if (n == NULL)
1215 break;
1216 n->m_len = min(MLEN, len);
1217 m->m_next = n;
1218 }
1219 m = m->m_next;
1220 }
1221 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1222 m->m_pkthdr.len = totlen;
1223 }
1224
1225 /*
1226 * Append the specified data to the indicated mbuf chain,
1227 * Extend the mbuf chain if the new data does not fit in
1228 * existing space.
1229 *
1230 * Return 1 if able to complete the job; otherwise 0.
1231 */
1232 int
1233 m_append(struct mbuf *m0, int len, c_caddr_t cp)
1234 {
1235 struct mbuf *m, *n;
1236 int remainder, space;
1237
1238 for (m = m0; m->m_next != NULL; m = m->m_next)
1239 ;
1240 remainder = len;
1241 space = M_TRAILINGSPACE(m);
1242 if (space > 0) {
1243 /*
1244 * Copy into available space.
1245 */
1246 if (space > remainder)
1247 space = remainder;
1248 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1249 m->m_len += space;
1250 cp += space, remainder -= space;
1251 }
1252 while (remainder > 0) {
1253 /*
1254 * Allocate a new mbuf; could check space
1255 * and allocate a cluster instead.
1256 */
1257 n = m_get(M_NOWAIT, m->m_type);
1258 if (n == NULL)
1259 break;
1260 n->m_len = min(MLEN, remainder);
1261 bcopy(cp, mtod(n, caddr_t), n->m_len);
1262 cp += n->m_len, remainder -= n->m_len;
1263 m->m_next = n;
1264 m = n;
1265 }
1266 if (m0->m_flags & M_PKTHDR)
1267 m0->m_pkthdr.len += len - remainder;
1268 return (remainder == 0);
1269 }
1270
1271 static int
1272 m_apply_extpg_one(struct mbuf *m, int off, int len,
1273 int (*f)(void *, void *, u_int), void *arg)
1274 {
1275 void *p;
1276 u_int i, count, pgoff, pglen;
1277 int rval;
1278
1279 KASSERT(PMAP_HAS_DMAP,
1280 ("m_apply_extpg_one does not support unmapped mbufs"));
1281 off += mtod(m, vm_offset_t);
1282 if (off < m->m_epg_hdrlen) {
1283 count = min(m->m_epg_hdrlen - off, len);
1284 rval = f(arg, m->m_epg_hdr + off, count);
1285 if (rval)
1286 return (rval);
1287 len -= count;
1288 off = 0;
1289 } else
1290 off -= m->m_epg_hdrlen;
1291 pgoff = m->m_epg_1st_off;
1292 for (i = 0; i < m->m_epg_npgs && len > 0; i++) {
1293 pglen = m_epg_pagelen(m, i, pgoff);
1294 if (off < pglen) {
1295 count = min(pglen - off, len);
1296 p = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] + pgoff);
1297 rval = f(arg, p, count);
1298 if (rval)
1299 return (rval);
1300 len -= count;
1301 off = 0;
1302 } else
1303 off -= pglen;
1304 pgoff = 0;
1305 }
1306 if (len > 0) {
1307 KASSERT(off < m->m_epg_trllen,
1308 ("m_apply_extpg_one: offset beyond trailer"));
1309 KASSERT(len <= m->m_epg_trllen - off,
1310 ("m_apply_extpg_one: length beyond trailer"));
1311 return (f(arg, m->m_epg_trail + off, len));
1312 }
1313 return (0);
1314 }
1315
1316 /* Apply function f to the data in a single mbuf. */
1317 static int
1318 m_apply_one(struct mbuf *m, int off, int len,
1319 int (*f)(void *, void *, u_int), void *arg)
1320 {
1321 if ((m->m_flags & M_EXTPG) != 0)
1322 return (m_apply_extpg_one(m, off, len, f, arg));
1323 else
1324 return (f(arg, mtod(m, caddr_t) + off, len));
1325 }
1326
1327 /*
1328 * Apply function f to the data in an mbuf chain starting "off" bytes from
1329 * the beginning, continuing for "len" bytes.
1330 */
1331 int
1332 m_apply(struct mbuf *m, int off, int len,
1333 int (*f)(void *, void *, u_int), void *arg)
1334 {
1335 u_int count;
1336 int rval;
1337
1338 KASSERT(off >= 0, ("m_apply, negative off %d", off));
1339 KASSERT(len >= 0, ("m_apply, negative len %d", len));
1340 while (off > 0) {
1341 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1342 if (off < m->m_len)
1343 break;
1344 off -= m->m_len;
1345 m = m->m_next;
1346 }
1347 while (len > 0) {
1348 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1349 count = min(m->m_len - off, len);
1350 rval = m_apply_one(m, off, count, f, arg);
1351 if (rval)
1352 return (rval);
1353 len -= count;
1354 off = 0;
1355 m = m->m_next;
1356 }
1357 return (0);
1358 }
1359
1360 /*
1361 * Return a pointer to mbuf/offset of location in mbuf chain.
1362 */
1363 struct mbuf *
1364 m_getptr(struct mbuf *m, int loc, int *off)
1365 {
1366
1367 while (loc >= 0) {
1368 /* Normal end of search. */
1369 if (m->m_len > loc) {
1370 *off = loc;
1371 return (m);
1372 } else {
1373 loc -= m->m_len;
1374 if (m->m_next == NULL) {
1375 if (loc == 0) {
1376 /* Point at the end of valid data. */
1377 *off = m->m_len;
1378 return (m);
1379 }
1380 return (NULL);
1381 }
1382 m = m->m_next;
1383 }
1384 }
1385 return (NULL);
1386 }
1387
1388 void
1389 m_print(const struct mbuf *m, int maxlen)
1390 {
1391 int len;
1392 int pdata;
1393 const struct mbuf *m2;
1394
1395 if (m == NULL) {
1396 printf("mbuf: %p\n", m);
1397 return;
1398 }
1399
1400 if (m->m_flags & M_PKTHDR)
1401 len = m->m_pkthdr.len;
1402 else
1403 len = -1;
1404 m2 = m;
1405 while (m2 != NULL && (len == -1 || len)) {
1406 pdata = m2->m_len;
1407 if (maxlen != -1 && pdata > maxlen)
1408 pdata = maxlen;
1409 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1410 m2->m_next, m2->m_flags, "\2\20freelist\17skipfw"
1411 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1412 "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1413 if (pdata)
1414 printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1415 if (len != -1)
1416 len -= m2->m_len;
1417 m2 = m2->m_next;
1418 }
1419 if (len > 0)
1420 printf("%d bytes unaccounted for.\n", len);
1421 return;
1422 }
1423
1424 u_int
1425 m_fixhdr(struct mbuf *m0)
1426 {
1427 u_int len;
1428
1429 len = m_length(m0, NULL);
1430 m0->m_pkthdr.len = len;
1431 return (len);
1432 }
1433
1434 u_int
1435 m_length(struct mbuf *m0, struct mbuf **last)
1436 {
1437 struct mbuf *m;
1438 u_int len;
1439
1440 len = 0;
1441 for (m = m0; m != NULL; m = m->m_next) {
1442 len += m->m_len;
1443 if (m->m_next == NULL)
1444 break;
1445 }
1446 if (last != NULL)
1447 *last = m;
1448 return (len);
1449 }
1450
1451 /*
1452 * Defragment a mbuf chain, returning the shortest possible
1453 * chain of mbufs and clusters. If allocation fails and
1454 * this cannot be completed, NULL will be returned, but
1455 * the passed in chain will be unchanged. Upon success,
1456 * the original chain will be freed, and the new chain
1457 * will be returned.
1458 *
1459 * If a non-packet header is passed in, the original
1460 * mbuf (chain?) will be returned unharmed.
1461 */
1462 struct mbuf *
1463 m_defrag(struct mbuf *m0, int how)
1464 {
1465 struct mbuf *m_new = NULL, *m_final = NULL;
1466 int progress = 0, length;
1467
1468 MBUF_CHECKSLEEP(how);
1469 if (!(m0->m_flags & M_PKTHDR))
1470 return (m0);
1471
1472 m_fixhdr(m0); /* Needed sanity check */
1473
1474 #ifdef MBUF_STRESS_TEST
1475 if (m_defragrandomfailures) {
1476 int temp = arc4random() & 0xff;
1477 if (temp == 0xba)
1478 goto nospace;
1479 }
1480 #endif
1481
1482 if (m0->m_pkthdr.len > MHLEN)
1483 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1484 else
1485 m_final = m_gethdr(how, MT_DATA);
1486
1487 if (m_final == NULL)
1488 goto nospace;
1489
1490 if (m_dup_pkthdr(m_final, m0, how) == 0)
1491 goto nospace;
1492
1493 m_new = m_final;
1494
1495 while (progress < m0->m_pkthdr.len) {
1496 length = m0->m_pkthdr.len - progress;
1497 if (length > MCLBYTES)
1498 length = MCLBYTES;
1499
1500 if (m_new == NULL) {
1501 if (length > MLEN)
1502 m_new = m_getcl(how, MT_DATA, 0);
1503 else
1504 m_new = m_get(how, MT_DATA);
1505 if (m_new == NULL)
1506 goto nospace;
1507 }
1508
1509 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1510 progress += length;
1511 m_new->m_len = length;
1512 if (m_new != m_final)
1513 m_cat(m_final, m_new);
1514 m_new = NULL;
1515 }
1516 #ifdef MBUF_STRESS_TEST
1517 if (m0->m_next == NULL)
1518 m_defraguseless++;
1519 #endif
1520 m_freem(m0);
1521 m0 = m_final;
1522 #ifdef MBUF_STRESS_TEST
1523 m_defragpackets++;
1524 m_defragbytes += m0->m_pkthdr.len;
1525 #endif
1526 return (m0);
1527 nospace:
1528 #ifdef MBUF_STRESS_TEST
1529 m_defragfailure++;
1530 #endif
1531 if (m_final)
1532 m_freem(m_final);
1533 return (NULL);
1534 }
1535
1536 /*
1537 * Return the number of fragments an mbuf will use. This is usually
1538 * used as a proxy for the number of scatter/gather elements needed by
1539 * a DMA engine to access an mbuf. In general mapped mbufs are
1540 * assumed to be backed by physically contiguous buffers that only
1541 * need a single fragment. Unmapped mbufs, on the other hand, can
1542 * span disjoint physical pages.
1543 */
1544 static int
1545 frags_per_mbuf(struct mbuf *m)
1546 {
1547 int frags;
1548
1549 if ((m->m_flags & M_EXTPG) == 0)
1550 return (1);
1551
1552 /*
1553 * The header and trailer are counted as a single fragment
1554 * each when present.
1555 *
1556 * XXX: This overestimates the number of fragments by assuming
1557 * all the backing physical pages are disjoint.
1558 */
1559 frags = 0;
1560 if (m->m_epg_hdrlen != 0)
1561 frags++;
1562 frags += m->m_epg_npgs;
1563 if (m->m_epg_trllen != 0)
1564 frags++;
1565
1566 return (frags);
1567 }
1568
1569 /*
1570 * Defragment an mbuf chain, returning at most maxfrags separate
1571 * mbufs+clusters. If this is not possible NULL is returned and
1572 * the original mbuf chain is left in its present (potentially
1573 * modified) state. We use two techniques: collapsing consecutive
1574 * mbufs and replacing consecutive mbufs by a cluster.
1575 *
1576 * NB: this should really be named m_defrag but that name is taken
1577 */
1578 struct mbuf *
1579 m_collapse(struct mbuf *m0, int how, int maxfrags)
1580 {
1581 struct mbuf *m, *n, *n2, **prev;
1582 u_int curfrags;
1583
1584 /*
1585 * Calculate the current number of frags.
1586 */
1587 curfrags = 0;
1588 for (m = m0; m != NULL; m = m->m_next)
1589 curfrags += frags_per_mbuf(m);
1590 /*
1591 * First, try to collapse mbufs. Note that we always collapse
1592 * towards the front so we don't need to deal with moving the
1593 * pkthdr. This may be suboptimal if the first mbuf has much
1594 * less data than the following.
1595 */
1596 m = m0;
1597 again:
1598 for (;;) {
1599 n = m->m_next;
1600 if (n == NULL)
1601 break;
1602 if (M_WRITABLE(m) &&
1603 n->m_len < M_TRAILINGSPACE(m)) {
1604 m_copydata(n, 0, n->m_len,
1605 mtod(m, char *) + m->m_len);
1606 m->m_len += n->m_len;
1607 m->m_next = n->m_next;
1608 curfrags -= frags_per_mbuf(n);
1609 m_free(n);
1610 if (curfrags <= maxfrags)
1611 return m0;
1612 } else
1613 m = n;
1614 }
1615 KASSERT(maxfrags > 1,
1616 ("maxfrags %u, but normal collapse failed", maxfrags));
1617 /*
1618 * Collapse consecutive mbufs to a cluster.
1619 */
1620 prev = &m0->m_next; /* NB: not the first mbuf */
1621 while ((n = *prev) != NULL) {
1622 if ((n2 = n->m_next) != NULL &&
1623 n->m_len + n2->m_len < MCLBYTES) {
1624 m = m_getcl(how, MT_DATA, 0);
1625 if (m == NULL)
1626 goto bad;
1627 m_copydata(n, 0, n->m_len, mtod(m, char *));
1628 m_copydata(n2, 0, n2->m_len,
1629 mtod(m, char *) + n->m_len);
1630 m->m_len = n->m_len + n2->m_len;
1631 m->m_next = n2->m_next;
1632 *prev = m;
1633 curfrags += 1; /* For the new cluster */
1634 curfrags -= frags_per_mbuf(n);
1635 curfrags -= frags_per_mbuf(n2);
1636 m_free(n);
1637 m_free(n2);
1638 if (curfrags <= maxfrags)
1639 return m0;
1640 /*
1641 * Still not there, try the normal collapse
1642 * again before we allocate another cluster.
1643 */
1644 goto again;
1645 }
1646 prev = &n->m_next;
1647 }
1648 /*
1649 * No place where we can collapse to a cluster; punt.
1650 * This can occur if, for example, you request 2 frags
1651 * but the packet requires that both be clusters (we
1652 * never reallocate the first mbuf to avoid moving the
1653 * packet header).
1654 */
1655 bad:
1656 return NULL;
1657 }
1658
1659 #ifdef MBUF_STRESS_TEST
1660
1661 /*
1662 * Fragment an mbuf chain. There's no reason you'd ever want to do
1663 * this in normal usage, but it's great for stress testing various
1664 * mbuf consumers.
1665 *
1666 * If fragmentation is not possible, the original chain will be
1667 * returned.
1668 *
1669 * Possible length values:
1670 * 0 no fragmentation will occur
1671 * > 0 each fragment will be of the specified length
1672 * -1 each fragment will be the same random value in length
1673 * -2 each fragment's length will be entirely random
1674 * (Random values range from 1 to 256)
1675 */
1676 struct mbuf *
1677 m_fragment(struct mbuf *m0, int how, int length)
1678 {
1679 struct mbuf *m_first, *m_last;
1680 int divisor = 255, progress = 0, fraglen;
1681
1682 if (!(m0->m_flags & M_PKTHDR))
1683 return (m0);
1684
1685 if (length == 0 || length < -2)
1686 return (m0);
1687 if (length > MCLBYTES)
1688 length = MCLBYTES;
1689 if (length < 0 && divisor > MCLBYTES)
1690 divisor = MCLBYTES;
1691 if (length == -1)
1692 length = 1 + (arc4random() % divisor);
1693 if (length > 0)
1694 fraglen = length;
1695
1696 m_fixhdr(m0); /* Needed sanity check */
1697
1698 m_first = m_getcl(how, MT_DATA, M_PKTHDR);
1699 if (m_first == NULL)
1700 goto nospace;
1701
1702 if (m_dup_pkthdr(m_first, m0, how) == 0)
1703 goto nospace;
1704
1705 m_last = m_first;
1706
1707 while (progress < m0->m_pkthdr.len) {
1708 if (length == -2)
1709 fraglen = 1 + (arc4random() % divisor);
1710 if (fraglen > m0->m_pkthdr.len - progress)
1711 fraglen = m0->m_pkthdr.len - progress;
1712
1713 if (progress != 0) {
1714 struct mbuf *m_new = m_getcl(how, MT_DATA, 0);
1715 if (m_new == NULL)
1716 goto nospace;
1717
1718 m_last->m_next = m_new;
1719 m_last = m_new;
1720 }
1721
1722 m_copydata(m0, progress, fraglen, mtod(m_last, caddr_t));
1723 progress += fraglen;
1724 m_last->m_len = fraglen;
1725 }
1726 m_freem(m0);
1727 m0 = m_first;
1728 return (m0);
1729 nospace:
1730 if (m_first)
1731 m_freem(m_first);
1732 /* Return the original chain on failure */
1733 return (m0);
1734 }
1735
1736 #endif
1737
1738 /*
1739 * Free pages from mbuf_ext_pgs, assuming they were allocated via
1740 * vm_page_alloc() and aren't associated with any object. Complement
1741 * to allocator from m_uiotombuf_nomap().
1742 */
1743 void
1744 mb_free_mext_pgs(struct mbuf *m)
1745 {
1746 vm_page_t pg;
1747
1748 M_ASSERTEXTPG(m);
1749 for (int i = 0; i < m->m_epg_npgs; i++) {
1750 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
1751 vm_page_unwire_noq(pg);
1752 vm_page_free(pg);
1753 }
1754 }
1755
1756 static struct mbuf *
1757 m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags)
1758 {
1759 struct mbuf *m, *mb, *prev;
1760 vm_page_t pg_array[MBUF_PEXT_MAX_PGS];
1761 int error, length, i, needed;
1762 ssize_t total;
1763 int pflags = malloc2vm_flags(how) | VM_ALLOC_NODUMP | VM_ALLOC_WIRED;
1764
1765 MPASS((flags & M_PKTHDR) == 0);
1766 MPASS((how & M_ZERO) == 0);
1767
1768 /*
1769 * len can be zero or an arbitrary large value bound by
1770 * the total data supplied by the uio.
1771 */
1772 if (len > 0)
1773 total = MIN(uio->uio_resid, len);
1774 else
1775 total = uio->uio_resid;
1776
1777 if (maxseg == 0)
1778 maxseg = MBUF_PEXT_MAX_PGS * PAGE_SIZE;
1779
1780 /*
1781 * If total is zero, return an empty mbuf. This can occur
1782 * for TLS 1.0 connections which send empty fragments as
1783 * a countermeasure against the known-IV weakness in CBC
1784 * ciphersuites.
1785 */
1786 if (__predict_false(total == 0)) {
1787 mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs);
1788 if (mb == NULL)
1789 return (NULL);
1790 mb->m_epg_flags = EPG_FLAG_ANON;
1791 return (mb);
1792 }
1793
1794 /*
1795 * Allocate the pages
1796 */
1797 m = NULL;
1798 while (total > 0) {
1799 mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs);
1800 if (mb == NULL)
1801 goto failed;
1802 if (m == NULL)
1803 m = mb;
1804 else
1805 prev->m_next = mb;
1806 prev = mb;
1807 mb->m_epg_flags = EPG_FLAG_ANON;
1808 needed = length = MIN(maxseg, total);
1809 for (i = 0; needed > 0; i++, needed -= PAGE_SIZE) {
1810 retry_page:
1811 pg_array[i] = vm_page_alloc_noobj(pflags);
1812 if (pg_array[i] == NULL) {
1813 if (how & M_NOWAIT) {
1814 goto failed;
1815 } else {
1816 vm_wait(NULL);
1817 goto retry_page;
1818 }
1819 }
1820 mb->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg_array[i]);
1821 mb->m_epg_npgs++;
1822 }
1823 mb->m_epg_last_len = length - PAGE_SIZE * (mb->m_epg_npgs - 1);
1824 MBUF_EXT_PGS_ASSERT_SANITY(mb);
1825 total -= length;
1826 error = uiomove_fromphys(pg_array, 0, length, uio);
1827 if (error != 0)
1828 goto failed;
1829 mb->m_len = length;
1830 mb->m_ext.ext_size += PAGE_SIZE * mb->m_epg_npgs;
1831 if (flags & M_PKTHDR)
1832 m->m_pkthdr.len += length;
1833 }
1834 return (m);
1835
1836 failed:
1837 m_freem(m);
1838 return (NULL);
1839 }
1840
1841 /*
1842 * Copy the contents of uio into a properly sized mbuf chain.
1843 */
1844 struct mbuf *
1845 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1846 {
1847 struct mbuf *m, *mb;
1848 int error, length;
1849 ssize_t total;
1850 int progress = 0;
1851
1852 if (flags & M_EXTPG)
1853 return (m_uiotombuf_nomap(uio, how, len, align, flags));
1854
1855 /*
1856 * len can be zero or an arbitrary large value bound by
1857 * the total data supplied by the uio.
1858 */
1859 if (len > 0)
1860 total = (uio->uio_resid < len) ? uio->uio_resid : len;
1861 else
1862 total = uio->uio_resid;
1863
1864 /*
1865 * The smallest unit returned by m_getm2() is a single mbuf
1866 * with pkthdr. We can't align past it.
1867 */
1868 if (align >= MHLEN)
1869 return (NULL);
1870
1871 /*
1872 * Give us the full allocation or nothing.
1873 * If len is zero return the smallest empty mbuf.
1874 */
1875 m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1876 if (m == NULL)
1877 return (NULL);
1878 m->m_data += align;
1879
1880 /* Fill all mbufs with uio data and update header information. */
1881 for (mb = m; mb != NULL; mb = mb->m_next) {
1882 length = min(M_TRAILINGSPACE(mb), total - progress);
1883
1884 error = uiomove(mtod(mb, void *), length, uio);
1885 if (error) {
1886 m_freem(m);
1887 return (NULL);
1888 }
1889
1890 mb->m_len = length;
1891 progress += length;
1892 if (flags & M_PKTHDR)
1893 m->m_pkthdr.len += length;
1894 }
1895 KASSERT(progress == total, ("%s: progress != total", __func__));
1896
1897 return (m);
1898 }
1899
1900 /*
1901 * Copy data to/from an unmapped mbuf into a uio limited by len if set.
1902 */
1903 int
1904 m_unmapped_uiomove(const struct mbuf *m, int m_off, struct uio *uio, int len)
1905 {
1906 vm_page_t pg;
1907 int error, i, off, pglen, pgoff, seglen, segoff;
1908
1909 M_ASSERTEXTPG(m);
1910 error = 0;
1911
1912 /* Skip over any data removed from the front. */
1913 off = mtod(m, vm_offset_t);
1914
1915 off += m_off;
1916 if (m->m_epg_hdrlen != 0) {
1917 if (off >= m->m_epg_hdrlen) {
1918 off -= m->m_epg_hdrlen;
1919 } else {
1920 seglen = m->m_epg_hdrlen - off;
1921 segoff = off;
1922 seglen = min(seglen, len);
1923 off = 0;
1924 len -= seglen;
1925 error = uiomove(__DECONST(void *,
1926 &m->m_epg_hdr[segoff]), seglen, uio);
1927 }
1928 }
1929 pgoff = m->m_epg_1st_off;
1930 for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
1931 pglen = m_epg_pagelen(m, i, pgoff);
1932 if (off >= pglen) {
1933 off -= pglen;
1934 pgoff = 0;
1935 continue;
1936 }
1937 seglen = pglen - off;
1938 segoff = pgoff + off;
1939 off = 0;
1940 seglen = min(seglen, len);
1941 len -= seglen;
1942 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
1943 error = uiomove_fromphys(&pg, segoff, seglen, uio);
1944 pgoff = 0;
1945 };
1946 if (len != 0 && error == 0) {
1947 KASSERT((off + len) <= m->m_epg_trllen,
1948 ("off + len > trail (%d + %d > %d, m_off = %d)", off, len,
1949 m->m_epg_trllen, m_off));
1950 error = uiomove(__DECONST(void *, &m->m_epg_trail[off]),
1951 len, uio);
1952 }
1953 return (error);
1954 }
1955
1956 /*
1957 * Copy an mbuf chain into a uio limited by len if set.
1958 */
1959 int
1960 m_mbuftouio(struct uio *uio, const struct mbuf *m, int len)
1961 {
1962 int error, length, total;
1963 int progress = 0;
1964
1965 if (len > 0)
1966 total = min(uio->uio_resid, len);
1967 else
1968 total = uio->uio_resid;
1969
1970 /* Fill the uio with data from the mbufs. */
1971 for (; m != NULL; m = m->m_next) {
1972 length = min(m->m_len, total - progress);
1973
1974 if ((m->m_flags & M_EXTPG) != 0)
1975 error = m_unmapped_uiomove(m, 0, uio, length);
1976 else
1977 error = uiomove(mtod(m, void *), length, uio);
1978 if (error)
1979 return (error);
1980
1981 progress += length;
1982 }
1983
1984 return (0);
1985 }
1986
1987 /*
1988 * Create a writable copy of the mbuf chain. While doing this
1989 * we compact the chain with a goal of producing a chain with
1990 * at most two mbufs. The second mbuf in this chain is likely
1991 * to be a cluster. The primary purpose of this work is to create
1992 * a writable packet for encryption, compression, etc. The
1993 * secondary goal is to linearize the data so the data can be
1994 * passed to crypto hardware in the most efficient manner possible.
1995 */
1996 struct mbuf *
1997 m_unshare(struct mbuf *m0, int how)
1998 {
1999 struct mbuf *m, *mprev;
2000 struct mbuf *n, *mfirst, *mlast;
2001 int len, off;
2002
2003 mprev = NULL;
2004 for (m = m0; m != NULL; m = mprev->m_next) {
2005 /*
2006 * Regular mbufs are ignored unless there's a cluster
2007 * in front of it that we can use to coalesce. We do
2008 * the latter mainly so later clusters can be coalesced
2009 * also w/o having to handle them specially (i.e. convert
2010 * mbuf+cluster -> cluster). This optimization is heavily
2011 * influenced by the assumption that we're running over
2012 * Ethernet where MCLBYTES is large enough that the max
2013 * packet size will permit lots of coalescing into a
2014 * single cluster. This in turn permits efficient
2015 * crypto operations, especially when using hardware.
2016 */
2017 if ((m->m_flags & M_EXT) == 0) {
2018 if (mprev && (mprev->m_flags & M_EXT) &&
2019 m->m_len <= M_TRAILINGSPACE(mprev)) {
2020 /* XXX: this ignores mbuf types */
2021 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
2022 mtod(m, caddr_t), m->m_len);
2023 mprev->m_len += m->m_len;
2024 mprev->m_next = m->m_next; /* unlink from chain */
2025 m_free(m); /* reclaim mbuf */
2026 } else {
2027 mprev = m;
2028 }
2029 continue;
2030 }
2031 /*
2032 * Writable mbufs are left alone (for now).
2033 */
2034 if (M_WRITABLE(m)) {
2035 mprev = m;
2036 continue;
2037 }
2038
2039 /*
2040 * Not writable, replace with a copy or coalesce with
2041 * the previous mbuf if possible (since we have to copy
2042 * it anyway, we try to reduce the number of mbufs and
2043 * clusters so that future work is easier).
2044 */
2045 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
2046 /* NB: we only coalesce into a cluster or larger */
2047 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
2048 m->m_len <= M_TRAILINGSPACE(mprev)) {
2049 /* XXX: this ignores mbuf types */
2050 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
2051 mtod(m, caddr_t), m->m_len);
2052 mprev->m_len += m->m_len;
2053 mprev->m_next = m->m_next; /* unlink from chain */
2054 m_free(m); /* reclaim mbuf */
2055 continue;
2056 }
2057
2058 /*
2059 * Allocate new space to hold the copy and copy the data.
2060 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
2061 * splitting them into clusters. We could just malloc a
2062 * buffer and make it external but too many device drivers
2063 * don't know how to break up the non-contiguous memory when
2064 * doing DMA.
2065 */
2066 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
2067 if (n == NULL) {
2068 m_freem(m0);
2069 return (NULL);
2070 }
2071 if (m->m_flags & M_PKTHDR) {
2072 KASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR",
2073 __func__, m0, m));
2074 m_move_pkthdr(n, m);
2075 }
2076 len = m->m_len;
2077 off = 0;
2078 mfirst = n;
2079 mlast = NULL;
2080 for (;;) {
2081 int cc = min(len, MCLBYTES);
2082 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
2083 n->m_len = cc;
2084 if (mlast != NULL)
2085 mlast->m_next = n;
2086 mlast = n;
2087 #if 0
2088 newipsecstat.ips_clcopied++;
2089 #endif
2090
2091 len -= cc;
2092 if (len <= 0)
2093 break;
2094 off += cc;
2095
2096 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
2097 if (n == NULL) {
2098 m_freem(mfirst);
2099 m_freem(m0);
2100 return (NULL);
2101 }
2102 }
2103 n->m_next = m->m_next;
2104 if (mprev == NULL)
2105 m0 = mfirst; /* new head of chain */
2106 else
2107 mprev->m_next = mfirst; /* replace old mbuf */
2108 m_free(m); /* release old mbuf */
2109 mprev = mfirst;
2110 }
2111 return (m0);
2112 }
2113
2114 #ifdef MBUF_PROFILING
2115
2116 #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/
2117 struct mbufprofile {
2118 uintmax_t wasted[MP_BUCKETS];
2119 uintmax_t used[MP_BUCKETS];
2120 uintmax_t segments[MP_BUCKETS];
2121 } mbprof;
2122
2123 void
2124 m_profile(struct mbuf *m)
2125 {
2126 int segments = 0;
2127 int used = 0;
2128 int wasted = 0;
2129
2130 while (m) {
2131 segments++;
2132 used += m->m_len;
2133 if (m->m_flags & M_EXT) {
2134 wasted += MHLEN - sizeof(m->m_ext) +
2135 m->m_ext.ext_size - m->m_len;
2136 } else {
2137 if (m->m_flags & M_PKTHDR)
2138 wasted += MHLEN - m->m_len;
2139 else
2140 wasted += MLEN - m->m_len;
2141 }
2142 m = m->m_next;
2143 }
2144 /* be paranoid.. it helps */
2145 if (segments > MP_BUCKETS - 1)
2146 segments = MP_BUCKETS - 1;
2147 if (used > 100000)
2148 used = 100000;
2149 if (wasted > 100000)
2150 wasted = 100000;
2151 /* store in the appropriate bucket */
2152 /* don't bother locking. if it's slightly off, so what? */
2153 mbprof.segments[segments]++;
2154 mbprof.used[fls(used)]++;
2155 mbprof.wasted[fls(wasted)]++;
2156 }
2157
2158 static int
2159 mbprof_handler(SYSCTL_HANDLER_ARGS)
2160 {
2161 char buf[256];
2162 struct sbuf sb;
2163 int error;
2164 uint64_t *p;
2165
2166 sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req);
2167
2168 p = &mbprof.wasted[0];
2169 sbuf_printf(&sb,
2170 "wasted:\n"
2171 "%ju %ju %ju %ju %ju %ju %ju %ju "
2172 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2173 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2174 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2175 #ifdef BIG_ARRAY
2176 p = &mbprof.wasted[16];
2177 sbuf_printf(&sb,
2178 "%ju %ju %ju %ju %ju %ju %ju %ju "
2179 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2180 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2181 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2182 #endif
2183 p = &mbprof.used[0];
2184 sbuf_printf(&sb,
2185 "used:\n"
2186 "%ju %ju %ju %ju %ju %ju %ju %ju "
2187 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2188 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2189 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2190 #ifdef BIG_ARRAY
2191 p = &mbprof.used[16];
2192 sbuf_printf(&sb,
2193 "%ju %ju %ju %ju %ju %ju %ju %ju "
2194 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2195 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2196 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2197 #endif
2198 p = &mbprof.segments[0];
2199 sbuf_printf(&sb,
2200 "segments:\n"
2201 "%ju %ju %ju %ju %ju %ju %ju %ju "
2202 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2203 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2204 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2205 #ifdef BIG_ARRAY
2206 p = &mbprof.segments[16];
2207 sbuf_printf(&sb,
2208 "%ju %ju %ju %ju %ju %ju %ju %ju "
2209 "%ju %ju %ju %ju %ju %ju %ju %jju",
2210 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2211 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2212 #endif
2213
2214 error = sbuf_finish(&sb);
2215 sbuf_delete(&sb);
2216 return (error);
2217 }
2218
2219 static int
2220 mbprof_clr_handler(SYSCTL_HANDLER_ARGS)
2221 {
2222 int clear, error;
2223
2224 clear = 0;
2225 error = sysctl_handle_int(oidp, &clear, 0, req);
2226 if (error || !req->newptr)
2227 return (error);
2228
2229 if (clear) {
2230 bzero(&mbprof, sizeof(mbprof));
2231 }
2232
2233 return (error);
2234 }
2235
2236 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile,
2237 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
2238 mbprof_handler, "A",
2239 "mbuf profiling statistics");
2240
2241 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr,
2242 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
2243 mbprof_clr_handler, "I",
2244 "clear mbuf profiling statistics");
2245 #endif
Cache object: ce88c10864e0ece073288d038c1a6294
|