FreeBSD/Linux Kernel Cross Reference
sys/kern/uipc_mbuf.c
1 /* $NetBSD: uipc_mbuf.c,v 1.116 2006/11/01 10:17:59 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1982, 1986, 1988, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: uipc_mbuf.c,v 1.116 2006/11/01 10:17:59 yamt Exp $");
73
74 #include "opt_mbuftrace.h"
75 #include "opt_ddb.h"
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/proc.h>
80 #include <sys/malloc.h>
81 #define MBTYPES
82 #include <sys/mbuf.h>
83 #include <sys/kernel.h>
84 #include <sys/syslog.h>
85 #include <sys/domain.h>
86 #include <sys/protosw.h>
87 #include <sys/pool.h>
88 #include <sys/socket.h>
89 #include <sys/sysctl.h>
90
91 #include <net/if.h>
92
93 #include <uvm/uvm.h>
94
95
96 struct pool mbpool; /* mbuf pool */
97 struct pool mclpool; /* mbuf cluster pool */
98
99 struct pool_cache mbpool_cache;
100 struct pool_cache mclpool_cache;
101
102 struct mbstat mbstat;
103 int max_linkhdr;
104 int max_protohdr;
105 int max_hdr;
106 int max_datalen;
107
108 static int mb_ctor(void *, void *, int);
109
110 static void *mclpool_alloc(struct pool *, int);
111 static void mclpool_release(struct pool *, void *);
112
113 static struct pool_allocator mclpool_allocator = {
114 .pa_alloc = mclpool_alloc,
115 .pa_free = mclpool_release,
116 };
117
118 static struct mbuf *m_copym0(struct mbuf *, int, int, int, int);
119 static struct mbuf *m_split0(struct mbuf *, int, int, int);
120 static int m_copyback0(struct mbuf **, int, int, const void *, int, int);
121
122 /* flags for m_copyback0 */
123 #define M_COPYBACK0_COPYBACK 0x0001 /* copyback from cp */
124 #define M_COPYBACK0_PRESERVE 0x0002 /* preserve original data */
125 #define M_COPYBACK0_COW 0x0004 /* do copy-on-write */
126 #define M_COPYBACK0_EXTEND 0x0008 /* extend chain */
127
128 static const char mclpool_warnmsg[] =
129 "WARNING: mclpool limit reached; increase NMBCLUSTERS";
130
131 MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
132
133 #ifdef MBUFTRACE
134 struct mownerhead mowners = LIST_HEAD_INITIALIZER(mowners);
135 struct mowner unknown_mowners[] = {
136 MOWNER_INIT("unknown", "free"),
137 MOWNER_INIT("unknown", "data"),
138 MOWNER_INIT("unknown", "header"),
139 MOWNER_INIT("unknown", "soname"),
140 MOWNER_INIT("unknown", "soopts"),
141 MOWNER_INIT("unknown", "ftable"),
142 MOWNER_INIT("unknown", "control"),
143 MOWNER_INIT("unknown", "oobdata"),
144 };
145 struct mowner revoked_mowner = MOWNER_INIT("revoked", "");
146 #endif
147
148 /*
149 * Initialize the mbuf allocator.
150 */
151 void
152 mbinit(void)
153 {
154
155 KASSERT(sizeof(struct _m_ext) <= MHLEN);
156 KASSERT(sizeof(struct mbuf) == MSIZE);
157
158 mclpool_allocator.pa_backingmap = mb_map;
159 pool_init(&mbpool, msize, 0, 0, 0, "mbpl", NULL);
160 pool_init(&mclpool, mclbytes, 0, 0, 0, "mclpl", &mclpool_allocator);
161
162 pool_set_drain_hook(&mbpool, m_reclaim, NULL);
163 pool_set_drain_hook(&mclpool, m_reclaim, NULL);
164
165 pool_cache_init(&mbpool_cache, &mbpool, mb_ctor, NULL, NULL);
166 pool_cache_init(&mclpool_cache, &mclpool, NULL, NULL, NULL);
167
168 /*
169 * Set the hard limit on the mclpool to the number of
170 * mbuf clusters the kernel is to support. Log the limit
171 * reached message max once a minute.
172 */
173 pool_sethardlimit(&mclpool, nmbclusters, mclpool_warnmsg, 60);
174
175 /*
176 * Set a low water mark for both mbufs and clusters. This should
177 * help ensure that they can be allocated in a memory starvation
178 * situation. This is important for e.g. diskless systems which
179 * must allocate mbufs in order for the pagedaemon to clean pages.
180 */
181 pool_setlowat(&mbpool, mblowat);
182 pool_setlowat(&mclpool, mcllowat);
183
184 #ifdef MBUFTRACE
185 {
186 /*
187 * Attach the unknown mowners.
188 */
189 int i;
190 MOWNER_ATTACH(&revoked_mowner);
191 for (i = sizeof(unknown_mowners)/sizeof(unknown_mowners[0]);
192 i-- > 0; )
193 MOWNER_ATTACH(&unknown_mowners[i]);
194 }
195 #endif
196 }
197
198 /*
199 * sysctl helper routine for the kern.mbuf subtree. nmbclusters may
200 * or may not be writable, and mblowat and mcllowat need range
201 * checking and pool tweaking after being reset.
202 */
203 static int
204 sysctl_kern_mbuf(SYSCTLFN_ARGS)
205 {
206 int error, newval;
207 struct sysctlnode node;
208
209 node = *rnode;
210 node.sysctl_data = &newval;
211 switch (rnode->sysctl_num) {
212 case MBUF_NMBCLUSTERS:
213 if (mb_map != NULL) {
214 node.sysctl_flags &= ~CTLFLAG_READWRITE;
215 node.sysctl_flags |= CTLFLAG_READONLY;
216 }
217 /* FALLTHROUGH */
218 case MBUF_MBLOWAT:
219 case MBUF_MCLLOWAT:
220 newval = *(int*)rnode->sysctl_data;
221 break;
222 default:
223 return (EOPNOTSUPP);
224 }
225
226 error = sysctl_lookup(SYSCTLFN_CALL(&node));
227 if (error || newp == NULL)
228 return (error);
229 if (newval < 0)
230 return (EINVAL);
231
232 switch (node.sysctl_num) {
233 case MBUF_NMBCLUSTERS:
234 if (newval < nmbclusters)
235 return (EINVAL);
236 nmbclusters = newval;
237 pool_sethardlimit(&mclpool, nmbclusters, mclpool_warnmsg, 60);
238 break;
239 case MBUF_MBLOWAT:
240 mblowat = newval;
241 pool_setlowat(&mbpool, mblowat);
242 break;
243 case MBUF_MCLLOWAT:
244 mcllowat = newval;
245 pool_setlowat(&mclpool, mcllowat);
246 break;
247 }
248
249 return (0);
250 }
251
252 #ifdef MBUFTRACE
253 static int
254 sysctl_kern_mbuf_mowners(SYSCTLFN_ARGS)
255 {
256 struct mowner *mo;
257 size_t len = 0;
258 int error = 0;
259
260 if (namelen != 0)
261 return (EINVAL);
262 if (newp != NULL)
263 return (EPERM);
264
265 LIST_FOREACH(mo, &mowners, mo_link) {
266 if (oldp != NULL) {
267 if (*oldlenp - len < sizeof(*mo)) {
268 error = ENOMEM;
269 break;
270 }
271 error = copyout(mo, (caddr_t) oldp + len,
272 sizeof(*mo));
273 if (error)
274 break;
275 }
276 len += sizeof(*mo);
277 }
278
279 if (error == 0)
280 *oldlenp = len;
281
282 return (error);
283 }
284 #endif /* MBUFTRACE */
285
286 SYSCTL_SETUP(sysctl_kern_mbuf_setup, "sysctl kern.mbuf subtree setup")
287 {
288
289 sysctl_createv(clog, 0, NULL, NULL,
290 CTLFLAG_PERMANENT,
291 CTLTYPE_NODE, "kern", NULL,
292 NULL, 0, NULL, 0,
293 CTL_KERN, CTL_EOL);
294 sysctl_createv(clog, 0, NULL, NULL,
295 CTLFLAG_PERMANENT,
296 CTLTYPE_NODE, "mbuf",
297 SYSCTL_DESCR("mbuf control variables"),
298 NULL, 0, NULL, 0,
299 CTL_KERN, KERN_MBUF, CTL_EOL);
300
301 sysctl_createv(clog, 0, NULL, NULL,
302 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
303 CTLTYPE_INT, "msize",
304 SYSCTL_DESCR("mbuf base size"),
305 NULL, msize, NULL, 0,
306 CTL_KERN, KERN_MBUF, MBUF_MSIZE, CTL_EOL);
307 sysctl_createv(clog, 0, NULL, NULL,
308 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
309 CTLTYPE_INT, "mclbytes",
310 SYSCTL_DESCR("mbuf cluster size"),
311 NULL, mclbytes, NULL, 0,
312 CTL_KERN, KERN_MBUF, MBUF_MCLBYTES, CTL_EOL);
313 sysctl_createv(clog, 0, NULL, NULL,
314 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
315 CTLTYPE_INT, "nmbclusters",
316 SYSCTL_DESCR("Limit on the number of mbuf clusters"),
317 sysctl_kern_mbuf, 0, &nmbclusters, 0,
318 CTL_KERN, KERN_MBUF, MBUF_NMBCLUSTERS, CTL_EOL);
319 sysctl_createv(clog, 0, NULL, NULL,
320 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
321 CTLTYPE_INT, "mblowat",
322 SYSCTL_DESCR("mbuf low water mark"),
323 sysctl_kern_mbuf, 0, &mblowat, 0,
324 CTL_KERN, KERN_MBUF, MBUF_MBLOWAT, CTL_EOL);
325 sysctl_createv(clog, 0, NULL, NULL,
326 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
327 CTLTYPE_INT, "mcllowat",
328 SYSCTL_DESCR("mbuf cluster low water mark"),
329 sysctl_kern_mbuf, 0, &mcllowat, 0,
330 CTL_KERN, KERN_MBUF, MBUF_MCLLOWAT, CTL_EOL);
331 sysctl_createv(clog, 0, NULL, NULL,
332 CTLFLAG_PERMANENT,
333 CTLTYPE_STRUCT, "stats",
334 SYSCTL_DESCR("mbuf allocation statistics"),
335 NULL, 0, &mbstat, sizeof(mbstat),
336 CTL_KERN, KERN_MBUF, MBUF_STATS, CTL_EOL);
337 #ifdef MBUFTRACE
338 sysctl_createv(clog, 0, NULL, NULL,
339 CTLFLAG_PERMANENT,
340 CTLTYPE_STRUCT, "mowners",
341 SYSCTL_DESCR("Information about mbuf owners"),
342 sysctl_kern_mbuf_mowners, 0, NULL, 0,
343 CTL_KERN, KERN_MBUF, MBUF_MOWNERS, CTL_EOL);
344 #endif /* MBUFTRACE */
345 }
346
347 static void *
348 mclpool_alloc(struct pool *pp, int flags)
349 {
350 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
351
352 return ((void *)uvm_km_alloc_poolpage(mb_map, waitok));
353 }
354
355 static void
356 mclpool_release(struct pool *pp, void *v)
357 {
358
359 uvm_km_free_poolpage(mb_map, (vaddr_t)v);
360 }
361
362 /*ARGSUSED*/
363 static int
364 mb_ctor(void *arg, void *object, int flags)
365 {
366 struct mbuf *m = object;
367
368 #ifdef POOL_VTOPHYS
369 m->m_paddr = POOL_VTOPHYS(m);
370 #else
371 m->m_paddr = M_PADDR_INVALID;
372 #endif
373 return (0);
374 }
375
376 void
377 m_reclaim(void *arg, int flags)
378 {
379 struct domain *dp;
380 const struct protosw *pr;
381 struct ifnet *ifp;
382 int s = splvm();
383
384 DOMAIN_FOREACH(dp) {
385 for (pr = dp->dom_protosw;
386 pr < dp->dom_protoswNPROTOSW; pr++)
387 if (pr->pr_drain)
388 (*pr->pr_drain)();
389 }
390 IFNET_FOREACH(ifp) {
391 if (ifp->if_drain)
392 (*ifp->if_drain)(ifp);
393 }
394 splx(s);
395 mbstat.m_drain++;
396 }
397
398 /*
399 * Space allocation routines.
400 * These are also available as macros
401 * for critical paths.
402 */
403 struct mbuf *
404 m_get(int nowait, int type)
405 {
406 struct mbuf *m;
407
408 MGET(m, nowait, type);
409 return (m);
410 }
411
412 struct mbuf *
413 m_gethdr(int nowait, int type)
414 {
415 struct mbuf *m;
416
417 MGETHDR(m, nowait, type);
418 return (m);
419 }
420
421 struct mbuf *
422 m_getclr(int nowait, int type)
423 {
424 struct mbuf *m;
425
426 MGET(m, nowait, type);
427 if (m == 0)
428 return (NULL);
429 memset(mtod(m, caddr_t), 0, MLEN);
430 return (m);
431 }
432
433 void
434 m_clget(struct mbuf *m, int nowait)
435 {
436
437 MCLGET(m, nowait);
438 }
439
440 struct mbuf *
441 m_free(struct mbuf *m)
442 {
443 struct mbuf *n;
444
445 MFREE(m, n);
446 return (n);
447 }
448
449 void
450 m_freem(struct mbuf *m)
451 {
452 struct mbuf *n;
453
454 if (m == NULL)
455 return;
456 do {
457 MFREE(m, n);
458 m = n;
459 } while (m);
460 }
461
462 #ifdef MBUFTRACE
463 /*
464 * Walk a chain of mbufs, claiming ownership of each mbuf in the chain.
465 */
466 void
467 m_claimm(struct mbuf *m, struct mowner *mo)
468 {
469
470 for (; m != NULL; m = m->m_next)
471 MCLAIM(m, mo);
472 }
473 #endif
474
475 /*
476 * Mbuffer utility routines.
477 */
478
479 /*
480 * Lesser-used path for M_PREPEND:
481 * allocate new mbuf to prepend to chain,
482 * copy junk along.
483 */
484 struct mbuf *
485 m_prepend(struct mbuf *m, int len, int how)
486 {
487 struct mbuf *mn;
488
489 MGET(mn, how, m->m_type);
490 if (mn == (struct mbuf *)NULL) {
491 m_freem(m);
492 return ((struct mbuf *)NULL);
493 }
494 if (m->m_flags & M_PKTHDR) {
495 M_MOVE_PKTHDR(mn, m);
496 } else {
497 MCLAIM(mn, m->m_owner);
498 }
499 mn->m_next = m;
500 m = mn;
501 if (len < MHLEN)
502 MH_ALIGN(m, len);
503 m->m_len = len;
504 return (m);
505 }
506
507 /*
508 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
509 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
510 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
511 */
512 int MCFail;
513
514 struct mbuf *
515 m_copym(struct mbuf *m, int off0, int len, int wait)
516 {
517
518 return m_copym0(m, off0, len, wait, 0); /* shallow copy on M_EXT */
519 }
520
521 struct mbuf *
522 m_dup(struct mbuf *m, int off0, int len, int wait)
523 {
524
525 return m_copym0(m, off0, len, wait, 1); /* deep copy */
526 }
527
528 static struct mbuf *
529 m_copym0(struct mbuf *m, int off0, int len, int wait, int deep)
530 {
531 struct mbuf *n, **np;
532 int off = off0;
533 struct mbuf *top;
534 int copyhdr = 0;
535
536 if (off < 0 || len < 0)
537 panic("m_copym: off %d, len %d", off, len);
538 if (off == 0 && m->m_flags & M_PKTHDR)
539 copyhdr = 1;
540 while (off > 0) {
541 if (m == 0)
542 panic("m_copym: m == 0, off %d", off);
543 if (off < m->m_len)
544 break;
545 off -= m->m_len;
546 m = m->m_next;
547 }
548 np = ⊤
549 top = 0;
550 while (len > 0) {
551 if (m == 0) {
552 if (len != M_COPYALL)
553 panic("m_copym: m == 0, len %d [!COPYALL]",
554 len);
555 break;
556 }
557 MGET(n, wait, m->m_type);
558 *np = n;
559 if (n == 0)
560 goto nospace;
561 MCLAIM(n, m->m_owner);
562 if (copyhdr) {
563 M_COPY_PKTHDR(n, m);
564 if (len == M_COPYALL)
565 n->m_pkthdr.len -= off0;
566 else
567 n->m_pkthdr.len = len;
568 copyhdr = 0;
569 }
570 n->m_len = min(len, m->m_len - off);
571 if (m->m_flags & M_EXT) {
572 if (!deep) {
573 n->m_data = m->m_data + off;
574 n->m_ext = m->m_ext;
575 MCLADDREFERENCE(m, n);
576 } else {
577 /*
578 * we are unsure about the way m was allocated.
579 * copy into multiple MCLBYTES cluster mbufs.
580 */
581 MCLGET(n, wait);
582 n->m_len = 0;
583 n->m_len = M_TRAILINGSPACE(n);
584 n->m_len = min(n->m_len, len);
585 n->m_len = min(n->m_len, m->m_len - off);
586 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off,
587 (unsigned)n->m_len);
588 }
589 } else
590 memcpy(mtod(n, caddr_t), mtod(m, caddr_t)+off,
591 (unsigned)n->m_len);
592 if (len != M_COPYALL)
593 len -= n->m_len;
594 off += n->m_len;
595 #ifdef DIAGNOSTIC
596 if (off > m->m_len)
597 panic("m_copym0 overrun");
598 #endif
599 if (off == m->m_len) {
600 m = m->m_next;
601 off = 0;
602 }
603 np = &n->m_next;
604 }
605 if (top == 0)
606 MCFail++;
607 return (top);
608 nospace:
609 m_freem(top);
610 MCFail++;
611 return (NULL);
612 }
613
614 /*
615 * Copy an entire packet, including header (which must be present).
616 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
617 */
618 struct mbuf *
619 m_copypacket(struct mbuf *m, int how)
620 {
621 struct mbuf *top, *n, *o;
622
623 MGET(n, how, m->m_type);
624 top = n;
625 if (!n)
626 goto nospace;
627
628 MCLAIM(n, m->m_owner);
629 M_COPY_PKTHDR(n, m);
630 n->m_len = m->m_len;
631 if (m->m_flags & M_EXT) {
632 n->m_data = m->m_data;
633 n->m_ext = m->m_ext;
634 MCLADDREFERENCE(m, n);
635 } else {
636 memcpy(mtod(n, char *), mtod(m, char *), n->m_len);
637 }
638
639 m = m->m_next;
640 while (m) {
641 MGET(o, how, m->m_type);
642 if (!o)
643 goto nospace;
644
645 MCLAIM(o, m->m_owner);
646 n->m_next = o;
647 n = n->m_next;
648
649 n->m_len = m->m_len;
650 if (m->m_flags & M_EXT) {
651 n->m_data = m->m_data;
652 n->m_ext = m->m_ext;
653 MCLADDREFERENCE(m, n);
654 } else {
655 memcpy(mtod(n, char *), mtod(m, char *), n->m_len);
656 }
657
658 m = m->m_next;
659 }
660 return top;
661 nospace:
662 m_freem(top);
663 MCFail++;
664 return NULL;
665 }
666
667 /*
668 * Copy data from an mbuf chain starting "off" bytes from the beginning,
669 * continuing for "len" bytes, into the indicated buffer.
670 */
671 void
672 m_copydata(struct mbuf *m, int off, int len, void *vp)
673 {
674 unsigned count;
675 caddr_t cp = vp;
676
677 if (off < 0 || len < 0)
678 panic("m_copydata: off %d, len %d", off, len);
679 while (off > 0) {
680 if (m == NULL)
681 panic("m_copydata: m == NULL, off %d", off);
682 if (off < m->m_len)
683 break;
684 off -= m->m_len;
685 m = m->m_next;
686 }
687 while (len > 0) {
688 if (m == NULL)
689 panic("m_copydata: m == NULL, len %d", len);
690 count = min(m->m_len - off, len);
691 memcpy(cp, mtod(m, caddr_t) + off, count);
692 len -= count;
693 cp += count;
694 off = 0;
695 m = m->m_next;
696 }
697 }
698
699 /*
700 * Concatenate mbuf chain n to m.
701 * n might be copied into m (when n->m_len is small), therefore data portion of
702 * n could be copied into an mbuf of different mbuf type.
703 * Any m_pkthdr is not updated.
704 */
705 void
706 m_cat(struct mbuf *m, struct mbuf *n)
707 {
708
709 while (m->m_next)
710 m = m->m_next;
711 while (n) {
712 if (M_READONLY(m) || n->m_len > M_TRAILINGSPACE(m)) {
713 /* just join the two chains */
714 m->m_next = n;
715 return;
716 }
717 /* splat the data from one into the other */
718 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
719 (u_int)n->m_len);
720 m->m_len += n->m_len;
721 n = m_free(n);
722 }
723 }
724
725 void
726 m_adj(struct mbuf *mp, int req_len)
727 {
728 int len = req_len;
729 struct mbuf *m;
730 int count;
731
732 if ((m = mp) == NULL)
733 return;
734 if (len >= 0) {
735 /*
736 * Trim from head.
737 */
738 while (m != NULL && len > 0) {
739 if (m->m_len <= len) {
740 len -= m->m_len;
741 m->m_len = 0;
742 m = m->m_next;
743 } else {
744 m->m_len -= len;
745 m->m_data += len;
746 len = 0;
747 }
748 }
749 m = mp;
750 if (mp->m_flags & M_PKTHDR)
751 m->m_pkthdr.len -= (req_len - len);
752 } else {
753 /*
754 * Trim from tail. Scan the mbuf chain,
755 * calculating its length and finding the last mbuf.
756 * If the adjustment only affects this mbuf, then just
757 * adjust and return. Otherwise, rescan and truncate
758 * after the remaining size.
759 */
760 len = -len;
761 count = 0;
762 for (;;) {
763 count += m->m_len;
764 if (m->m_next == (struct mbuf *)0)
765 break;
766 m = m->m_next;
767 }
768 if (m->m_len >= len) {
769 m->m_len -= len;
770 if (mp->m_flags & M_PKTHDR)
771 mp->m_pkthdr.len -= len;
772 return;
773 }
774 count -= len;
775 if (count < 0)
776 count = 0;
777 /*
778 * Correct length for chain is "count".
779 * Find the mbuf with last data, adjust its length,
780 * and toss data from remaining mbufs on chain.
781 */
782 m = mp;
783 if (m->m_flags & M_PKTHDR)
784 m->m_pkthdr.len = count;
785 for (; m; m = m->m_next) {
786 if (m->m_len >= count) {
787 m->m_len = count;
788 break;
789 }
790 count -= m->m_len;
791 }
792 if (m)
793 while (m->m_next)
794 (m = m->m_next)->m_len = 0;
795 }
796 }
797
798 /*
799 * Rearrange an mbuf chain so that len bytes are contiguous
800 * and in the data area of an mbuf (so that mtod and dtom
801 * will work for a structure of size len). Returns the resulting
802 * mbuf chain on success, frees it and returns null on failure.
803 * If there is room, it will add up to max_protohdr-len extra bytes to the
804 * contiguous region in an attempt to avoid being called next time.
805 */
806 int MPFail;
807
808 struct mbuf *
809 m_pullup(struct mbuf *n, int len)
810 {
811 struct mbuf *m;
812 int count;
813 int space;
814
815 /*
816 * If first mbuf has no cluster, and has room for len bytes
817 * without shifting current data, pullup into it,
818 * otherwise allocate a new mbuf to prepend to the chain.
819 */
820 if ((n->m_flags & M_EXT) == 0 &&
821 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
822 if (n->m_len >= len)
823 return (n);
824 m = n;
825 n = n->m_next;
826 len -= m->m_len;
827 } else {
828 if (len > MHLEN)
829 goto bad;
830 MGET(m, M_DONTWAIT, n->m_type);
831 if (m == 0)
832 goto bad;
833 MCLAIM(m, n->m_owner);
834 m->m_len = 0;
835 if (n->m_flags & M_PKTHDR) {
836 M_MOVE_PKTHDR(m, n);
837 }
838 }
839 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
840 do {
841 count = min(min(max(len, max_protohdr), space), n->m_len);
842 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
843 (unsigned)count);
844 len -= count;
845 m->m_len += count;
846 n->m_len -= count;
847 space -= count;
848 if (n->m_len)
849 n->m_data += count;
850 else
851 n = m_free(n);
852 } while (len > 0 && n);
853 if (len > 0) {
854 (void) m_free(m);
855 goto bad;
856 }
857 m->m_next = n;
858 return (m);
859 bad:
860 m_freem(n);
861 MPFail++;
862 return (NULL);
863 }
864
865 /*
866 * Like m_pullup(), except a new mbuf is always allocated, and we allow
867 * the amount of empty space before the data in the new mbuf to be specified
868 * (in the event that the caller expects to prepend later).
869 */
870 int MSFail;
871
872 struct mbuf *
873 m_copyup(struct mbuf *n, int len, int dstoff)
874 {
875 struct mbuf *m;
876 int count, space;
877
878 if (len > (MHLEN - dstoff))
879 goto bad;
880 MGET(m, M_DONTWAIT, n->m_type);
881 if (m == NULL)
882 goto bad;
883 MCLAIM(m, n->m_owner);
884 m->m_len = 0;
885 if (n->m_flags & M_PKTHDR) {
886 M_MOVE_PKTHDR(m, n);
887 }
888 m->m_data += dstoff;
889 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
890 do {
891 count = min(min(max(len, max_protohdr), space), n->m_len);
892 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
893 (unsigned)count);
894 len -= count;
895 m->m_len += count;
896 n->m_len -= count;
897 space -= count;
898 if (n->m_len)
899 n->m_data += count;
900 else
901 n = m_free(n);
902 } while (len > 0 && n);
903 if (len > 0) {
904 (void) m_free(m);
905 goto bad;
906 }
907 m->m_next = n;
908 return (m);
909 bad:
910 m_freem(n);
911 MSFail++;
912 return (NULL);
913 }
914
915 /*
916 * Partition an mbuf chain in two pieces, returning the tail --
917 * all but the first len0 bytes. In case of failure, it returns NULL and
918 * attempts to restore the chain to its original state.
919 */
920 struct mbuf *
921 m_split(struct mbuf *m0, int len0, int wait)
922 {
923
924 return m_split0(m0, len0, wait, 1);
925 }
926
927 static struct mbuf *
928 m_split0(struct mbuf *m0, int len0, int wait, int copyhdr)
929 {
930 struct mbuf *m, *n;
931 unsigned len = len0, remain, len_save;
932
933 for (m = m0; m && len > m->m_len; m = m->m_next)
934 len -= m->m_len;
935 if (m == 0)
936 return (NULL);
937 remain = m->m_len - len;
938 if (copyhdr && (m0->m_flags & M_PKTHDR)) {
939 MGETHDR(n, wait, m0->m_type);
940 if (n == 0)
941 return (NULL);
942 MCLAIM(n, m0->m_owner);
943 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
944 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
945 len_save = m0->m_pkthdr.len;
946 m0->m_pkthdr.len = len0;
947 if (m->m_flags & M_EXT)
948 goto extpacket;
949 if (remain > MHLEN) {
950 /* m can't be the lead packet */
951 MH_ALIGN(n, 0);
952 n->m_next = m_split(m, len, wait);
953 if (n->m_next == 0) {
954 (void) m_free(n);
955 m0->m_pkthdr.len = len_save;
956 return (NULL);
957 } else
958 return (n);
959 } else
960 MH_ALIGN(n, remain);
961 } else if (remain == 0) {
962 n = m->m_next;
963 m->m_next = 0;
964 return (n);
965 } else {
966 MGET(n, wait, m->m_type);
967 if (n == 0)
968 return (NULL);
969 MCLAIM(n, m->m_owner);
970 M_ALIGN(n, remain);
971 }
972 extpacket:
973 if (m->m_flags & M_EXT) {
974 n->m_ext = m->m_ext;
975 MCLADDREFERENCE(m, n);
976 n->m_data = m->m_data + len;
977 } else {
978 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain);
979 }
980 n->m_len = remain;
981 m->m_len = len;
982 n->m_next = m->m_next;
983 m->m_next = 0;
984 return (n);
985 }
986 /*
987 * Routine to copy from device local memory into mbufs.
988 */
989 struct mbuf *
990 m_devget(char *buf, int totlen, int off0, struct ifnet *ifp,
991 void (*copy)(const void *from, void *to, size_t len))
992 {
993 struct mbuf *m;
994 struct mbuf *top = 0, **mp = ⊤
995 int off = off0, len;
996 char *cp;
997 char *epkt;
998
999 cp = buf;
1000 epkt = cp + totlen;
1001 if (off) {
1002 /*
1003 * If 'off' is non-zero, packet is trailer-encapsulated,
1004 * so we have to skip the type and length fields.
1005 */
1006 cp += off + 2 * sizeof(uint16_t);
1007 totlen -= 2 * sizeof(uint16_t);
1008 }
1009 MGETHDR(m, M_DONTWAIT, MT_DATA);
1010 if (m == 0)
1011 return (NULL);
1012 m->m_pkthdr.rcvif = ifp;
1013 m->m_pkthdr.len = totlen;
1014 m->m_len = MHLEN;
1015
1016 while (totlen > 0) {
1017 if (top) {
1018 MGET(m, M_DONTWAIT, MT_DATA);
1019 if (m == 0) {
1020 m_freem(top);
1021 return (NULL);
1022 }
1023 m->m_len = MLEN;
1024 }
1025 len = min(totlen, epkt - cp);
1026 if (len >= MINCLSIZE) {
1027 MCLGET(m, M_DONTWAIT);
1028 if ((m->m_flags & M_EXT) == 0) {
1029 m_free(m);
1030 m_freem(top);
1031 return (NULL);
1032 }
1033 m->m_len = len = min(len, MCLBYTES);
1034 } else {
1035 /*
1036 * Place initial small packet/header at end of mbuf.
1037 */
1038 if (len < m->m_len) {
1039 if (top == 0 && len + max_linkhdr <= m->m_len)
1040 m->m_data += max_linkhdr;
1041 m->m_len = len;
1042 } else
1043 len = m->m_len;
1044 }
1045 if (copy)
1046 copy(cp, mtod(m, caddr_t), (size_t)len);
1047 else
1048 memcpy(mtod(m, caddr_t), cp, (size_t)len);
1049 cp += len;
1050 *mp = m;
1051 mp = &m->m_next;
1052 totlen -= len;
1053 if (cp == epkt)
1054 cp = buf;
1055 }
1056 return (top);
1057 }
1058
1059 /*
1060 * Copy data from a buffer back into the indicated mbuf chain,
1061 * starting "off" bytes from the beginning, extending the mbuf
1062 * chain if necessary.
1063 */
1064 void
1065 m_copyback(struct mbuf *m0, int off, int len, const void *cp)
1066 {
1067 #if defined(DEBUG)
1068 struct mbuf *origm = m0;
1069 int error;
1070 #endif /* defined(DEBUG) */
1071
1072 if (m0 == NULL)
1073 return;
1074
1075 #if defined(DEBUG)
1076 error =
1077 #endif /* defined(DEBUG) */
1078 m_copyback0(&m0, off, len, cp,
1079 M_COPYBACK0_COPYBACK|M_COPYBACK0_EXTEND, M_DONTWAIT);
1080
1081 #if defined(DEBUG)
1082 if (error != 0 || (m0 != NULL && origm != m0))
1083 panic("m_copyback");
1084 #endif /* defined(DEBUG) */
1085 }
1086
1087 struct mbuf *
1088 m_copyback_cow(struct mbuf *m0, int off, int len, const void *cp, int how)
1089 {
1090 int error;
1091
1092 /* don't support chain expansion */
1093 KDASSERT(off + len <= m_length(m0));
1094
1095 error = m_copyback0(&m0, off, len, cp,
1096 M_COPYBACK0_COPYBACK|M_COPYBACK0_COW, how);
1097 if (error) {
1098 /*
1099 * no way to recover from partial success.
1100 * just free the chain.
1101 */
1102 m_freem(m0);
1103 return NULL;
1104 }
1105 return m0;
1106 }
1107
1108 /*
1109 * m_makewritable: ensure the specified range writable.
1110 */
1111 int
1112 m_makewritable(struct mbuf **mp, int off, int len, int how)
1113 {
1114 int error;
1115 #if defined(DEBUG)
1116 struct mbuf *n;
1117 int origlen, reslen;
1118
1119 origlen = m_length(*mp);
1120 #endif /* defined(DEBUG) */
1121
1122 #if 0 /* M_COPYALL is large enough */
1123 if (len == M_COPYALL)
1124 len = m_length(*mp) - off; /* XXX */
1125 #endif
1126
1127 error = m_copyback0(mp, off, len, NULL,
1128 M_COPYBACK0_PRESERVE|M_COPYBACK0_COW, how);
1129
1130 #if defined(DEBUG)
1131 reslen = 0;
1132 for (n = *mp; n; n = n->m_next)
1133 reslen += n->m_len;
1134 if (origlen != reslen)
1135 panic("m_makewritable: length changed");
1136 if (((*mp)->m_flags & M_PKTHDR) != 0 && reslen != (*mp)->m_pkthdr.len)
1137 panic("m_makewritable: inconsist");
1138 #endif /* defined(DEBUG) */
1139
1140 return error;
1141 }
1142
1143 int
1144 m_copyback0(struct mbuf **mp0, int off, int len, const void *vp, int flags,
1145 int how)
1146 {
1147 int mlen;
1148 struct mbuf *m, *n;
1149 struct mbuf **mp;
1150 int totlen = 0;
1151 const char *cp = vp;
1152
1153 KASSERT(mp0 != NULL);
1154 KASSERT(*mp0 != NULL);
1155 KASSERT((flags & M_COPYBACK0_PRESERVE) == 0 || cp == NULL);
1156 KASSERT((flags & M_COPYBACK0_COPYBACK) == 0 || cp != NULL);
1157
1158 /*
1159 * we don't bother to update "totlen" in the case of M_COPYBACK0_COW,
1160 * assuming that M_COPYBACK0_EXTEND and M_COPYBACK0_COW are exclusive.
1161 */
1162
1163 KASSERT((~flags & (M_COPYBACK0_EXTEND|M_COPYBACK0_COW)) != 0);
1164
1165 mp = mp0;
1166 m = *mp;
1167 while (off > (mlen = m->m_len)) {
1168 off -= mlen;
1169 totlen += mlen;
1170 if (m->m_next == NULL) {
1171 int tspace;
1172 extend:
1173 if ((flags & M_COPYBACK0_EXTEND) == 0)
1174 goto out;
1175
1176 /*
1177 * try to make some space at the end of "m".
1178 */
1179
1180 mlen = m->m_len;
1181 if (off + len >= MINCLSIZE &&
1182 (m->m_flags & M_EXT) == 0 && m->m_len == 0) {
1183 MCLGET(m, how);
1184 }
1185 tspace = M_TRAILINGSPACE(m);
1186 if (tspace > 0) {
1187 tspace = min(tspace, off + len);
1188 KASSERT(tspace > 0);
1189 memset(mtod(m, char *) + m->m_len, 0,
1190 min(off, tspace));
1191 m->m_len += tspace;
1192 off += mlen;
1193 totlen -= mlen;
1194 continue;
1195 }
1196
1197 /*
1198 * need to allocate an mbuf.
1199 */
1200
1201 if (off + len >= MINCLSIZE) {
1202 n = m_getcl(how, m->m_type, 0);
1203 } else {
1204 n = m_get(how, m->m_type);
1205 }
1206 if (n == NULL) {
1207 goto out;
1208 }
1209 n->m_len = 0;
1210 n->m_len = min(M_TRAILINGSPACE(n), off + len);
1211 memset(mtod(n, char *), 0, min(n->m_len, off));
1212 m->m_next = n;
1213 }
1214 mp = &m->m_next;
1215 m = m->m_next;
1216 }
1217 while (len > 0) {
1218 mlen = m->m_len - off;
1219 if (mlen != 0 && M_READONLY(m)) {
1220 char *datap;
1221 int eatlen;
1222
1223 /*
1224 * this mbuf is read-only.
1225 * allocate a new writable mbuf and try again.
1226 */
1227
1228 #if defined(DIAGNOSTIC)
1229 if ((flags & M_COPYBACK0_COW) == 0)
1230 panic("m_copyback0: read-only");
1231 #endif /* defined(DIAGNOSTIC) */
1232
1233 /*
1234 * if we're going to write into the middle of
1235 * a mbuf, split it first.
1236 */
1237 if (off > 0 && len < mlen) {
1238 n = m_split0(m, off, how, 0);
1239 if (n == NULL)
1240 goto enobufs;
1241 m->m_next = n;
1242 mp = &m->m_next;
1243 m = n;
1244 off = 0;
1245 continue;
1246 }
1247
1248 /*
1249 * XXX TODO coalesce into the trailingspace of
1250 * the previous mbuf when possible.
1251 */
1252
1253 /*
1254 * allocate a new mbuf. copy packet header if needed.
1255 */
1256 MGET(n, how, m->m_type);
1257 if (n == NULL)
1258 goto enobufs;
1259 MCLAIM(n, m->m_owner);
1260 if (off == 0 && (m->m_flags & M_PKTHDR) != 0) {
1261 M_MOVE_PKTHDR(n, m);
1262 n->m_len = MHLEN;
1263 } else {
1264 if (len >= MINCLSIZE)
1265 MCLGET(n, M_DONTWAIT);
1266 n->m_len =
1267 (n->m_flags & M_EXT) ? MCLBYTES : MLEN;
1268 }
1269 if (n->m_len > len)
1270 n->m_len = len;
1271
1272 /*
1273 * free the region which has been overwritten.
1274 * copying data from old mbufs if requested.
1275 */
1276 if (flags & M_COPYBACK0_PRESERVE)
1277 datap = mtod(n, char *);
1278 else
1279 datap = NULL;
1280 eatlen = n->m_len;
1281 KDASSERT(off == 0 || eatlen >= mlen);
1282 if (off > 0) {
1283 KDASSERT(len >= mlen);
1284 m->m_len = off;
1285 m->m_next = n;
1286 if (datap) {
1287 m_copydata(m, off, mlen, datap);
1288 datap += mlen;
1289 }
1290 eatlen -= mlen;
1291 mp = &m->m_next;
1292 m = m->m_next;
1293 }
1294 while (m != NULL && M_READONLY(m) &&
1295 n->m_type == m->m_type && eatlen > 0) {
1296 mlen = min(eatlen, m->m_len);
1297 if (datap) {
1298 m_copydata(m, 0, mlen, datap);
1299 datap += mlen;
1300 }
1301 m->m_data += mlen;
1302 m->m_len -= mlen;
1303 eatlen -= mlen;
1304 if (m->m_len == 0)
1305 *mp = m = m_free(m);
1306 }
1307 if (eatlen > 0)
1308 n->m_len -= eatlen;
1309 n->m_next = m;
1310 *mp = m = n;
1311 continue;
1312 }
1313 mlen = min(mlen, len);
1314 if (flags & M_COPYBACK0_COPYBACK) {
1315 memcpy(mtod(m, caddr_t) + off, cp, (unsigned)mlen);
1316 cp += mlen;
1317 }
1318 len -= mlen;
1319 mlen += off;
1320 off = 0;
1321 totlen += mlen;
1322 if (len == 0)
1323 break;
1324 if (m->m_next == NULL) {
1325 goto extend;
1326 }
1327 mp = &m->m_next;
1328 m = m->m_next;
1329 }
1330 out: if (((m = *mp0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) {
1331 KASSERT((flags & M_COPYBACK0_EXTEND) != 0);
1332 m->m_pkthdr.len = totlen;
1333 }
1334
1335 return 0;
1336
1337 enobufs:
1338 return ENOBUFS;
1339 }
1340
1341 void
1342 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1343 {
1344
1345 KASSERT((to->m_flags & M_EXT) == 0);
1346 KASSERT((to->m_flags & M_PKTHDR) == 0 || m_tag_first(to) == NULL);
1347 KASSERT((from->m_flags & M_PKTHDR) != 0);
1348
1349 to->m_pkthdr = from->m_pkthdr;
1350 to->m_flags = from->m_flags & M_COPYFLAGS;
1351 to->m_data = to->m_pktdat;
1352
1353 from->m_flags &= ~M_PKTHDR;
1354 }
1355
1356 /*
1357 * Apply function f to the data in an mbuf chain starting "off" bytes from the
1358 * beginning, continuing for "len" bytes.
1359 */
1360 int
1361 m_apply(struct mbuf *m, int off, int len,
1362 int (*f)(void *, caddr_t, unsigned int), void *arg)
1363 {
1364 unsigned int count;
1365 int rval;
1366
1367 KASSERT(len >= 0);
1368 KASSERT(off >= 0);
1369
1370 while (off > 0) {
1371 KASSERT(m != NULL);
1372 if (off < m->m_len)
1373 break;
1374 off -= m->m_len;
1375 m = m->m_next;
1376 }
1377 while (len > 0) {
1378 KASSERT(m != NULL);
1379 count = min(m->m_len - off, len);
1380
1381 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1382 if (rval)
1383 return (rval);
1384
1385 len -= count;
1386 off = 0;
1387 m = m->m_next;
1388 }
1389
1390 return (0);
1391 }
1392
1393 /*
1394 * Return a pointer to mbuf/offset of location in mbuf chain.
1395 */
1396 struct mbuf *
1397 m_getptr(struct mbuf *m, int loc, int *off)
1398 {
1399
1400 while (loc >= 0) {
1401 /* Normal end of search */
1402 if (m->m_len > loc) {
1403 *off = loc;
1404 return (m);
1405 } else {
1406 loc -= m->m_len;
1407
1408 if (m->m_next == NULL) {
1409 if (loc == 0) {
1410 /* Point at the end of valid data */
1411 *off = m->m_len;
1412 return (m);
1413 } else
1414 return (NULL);
1415 } else
1416 m = m->m_next;
1417 }
1418 }
1419
1420 return (NULL);
1421 }
1422
1423 #if defined(DDB)
1424 void
1425 m_print(const struct mbuf *m, const char *modif, void (*pr)(const char *, ...))
1426 {
1427 char ch;
1428 boolean_t opt_c = FALSE;
1429 char buf[512];
1430
1431 while ((ch = *(modif++)) != '\0') {
1432 switch (ch) {
1433 case 'c':
1434 opt_c = TRUE;
1435 break;
1436 }
1437 }
1438
1439 nextchain:
1440 (*pr)("MBUF %p\n", m);
1441 bitmask_snprintf(m->m_flags, M_FLAGS_BITS, buf, sizeof(buf));
1442 (*pr)(" data=%p, len=%d, type=%d, flags=0x%s\n",
1443 m->m_data, m->m_len, m->m_type, buf);
1444 (*pr)(" owner=%p, next=%p, nextpkt=%p\n", m->m_owner, m->m_next,
1445 m->m_nextpkt);
1446 (*pr)(" leadingspace=%u, trailingspace=%u, readonly=%u\n",
1447 (int)M_LEADINGSPACE(m), (int)M_TRAILINGSPACE(m),
1448 (int)M_READONLY(m));
1449 if ((m->m_flags & M_PKTHDR) != 0) {
1450 bitmask_snprintf(m->m_pkthdr.csum_flags, M_CSUM_BITS, buf,
1451 sizeof(buf));
1452 (*pr)(" pktlen=%d, rcvif=%p, csum_flags=0x%s, csum_data=0x%"
1453 PRIx32 ", segsz=%u\n",
1454 m->m_pkthdr.len, m->m_pkthdr.rcvif,
1455 buf, m->m_pkthdr.csum_data, m->m_pkthdr.segsz);
1456 }
1457 if ((m->m_flags & M_EXT)) {
1458 (*pr)(" shared=%u, ext_buf=%p, ext_size=%zd, "
1459 "ext_free=%p, ext_arg=%p\n",
1460 (int)MCLISREFERENCED(m),
1461 m->m_ext.ext_buf, m->m_ext.ext_size,
1462 m->m_ext.ext_free, m->m_ext.ext_arg);
1463 }
1464 if ((~m->m_flags & (M_EXT|M_EXT_PAGES)) == 0) {
1465 vaddr_t sva = (vaddr_t)m->m_ext.ext_buf;
1466 vaddr_t eva = sva + m->m_ext.ext_size;
1467 int n = (round_page(eva) - trunc_page(sva)) >> PAGE_SHIFT;
1468 int i;
1469
1470 (*pr)(" pages:");
1471 for (i = 0; i < n; i ++) {
1472 (*pr)(" %p", m->m_ext.ext_pgs[i]);
1473 }
1474 (*pr)("\n");
1475 }
1476
1477 if (opt_c) {
1478 m = m->m_next;
1479 if (m != NULL) {
1480 goto nextchain;
1481 }
1482 }
1483 }
1484 #endif /* defined(DDB) */
Cache object: 7704f6b4fd9fd2bec957621131103c77
|