FreeBSD/Linux Kernel Cross Reference
sys/kern/uipc_mbuf.c
1 /*
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
34 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.24.2.3 1999/09/05 08:15:32 peter Exp $
35 */
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/malloc.h>
41 #define MBTYPES
42 #include <sys/mbuf.h>
43 #include <sys/kernel.h>
44 #include <sys/syslog.h>
45 #include <sys/domain.h>
46 #include <sys/protosw.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_extern.h>
52
53 static void mbinit __P((void *));
54 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
55
56 struct mbuf *mbutl;
57 char *mclrefcnt;
58 struct mbstat mbstat;
59 struct mbuf *mmbfree;
60 union mcluster *mclfree;
61 int max_linkhdr;
62 int max_protohdr;
63 int max_hdr;
64 int max_datalen;
65
66 static void m_reclaim __P((void));
67
68 /* "number of clusters of pages" */
69 #define NCL_INIT 1
70
71 #define NMB_INIT 16
72
73 /* ARGSUSED*/
74 static void
75 mbinit(dummy)
76 void *dummy;
77 {
78 int s;
79
80 mmbfree = NULL; mclfree = NULL;
81 s = splimp();
82 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
83 goto bad;
84 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
85 goto bad;
86 splx(s);
87 return;
88 bad:
89 panic("mbinit");
90 }
91
92 /*
93 * Allocate at least nmb mbufs and place on mbuf free list.
94 * Must be called at splimp.
95 */
96 /* ARGSUSED */
97 int
98 m_mballoc(nmb, nowait)
99 register int nmb;
100 int nowait;
101 {
102 register caddr_t p;
103 register int i;
104 int nbytes;
105
106 /* Once we run out of map space, it will be impossible to get
107 * any more (nothing is ever freed back to the map) (XXX which
108 * is dumb). (however you are not dead as m_reclaim might
109 * still be able to free a substantial amount of space).
110 */
111 if (mb_map_full)
112 return (0);
113
114 nbytes = round_page(nmb * MSIZE);
115 p = (caddr_t)kmem_malloc(mb_map, nbytes, nowait ? M_NOWAIT : M_WAITOK);
116 /*
117 * Either the map is now full, or this is nowait and there
118 * are no pages left.
119 */
120 if (p == NULL)
121 return (0);
122
123 nmb = nbytes / MSIZE;
124 for (i = 0; i < nmb; i++) {
125 ((struct mbuf *)p)->m_next = mmbfree;
126 mmbfree = (struct mbuf *)p;
127 p += MSIZE;
128 }
129 mbstat.m_mbufs += nmb;
130 return (1);
131 }
132
133 /*
134 * Allocate some number of mbuf clusters
135 * and place on cluster free list.
136 * Must be called at splimp.
137 */
138 /* ARGSUSED */
139 int
140 m_clalloc(ncl, nowait)
141 register int ncl;
142 int nowait;
143 {
144 register caddr_t p;
145 register int i;
146 int npg;
147
148 /*
149 * Once we run out of map space, it will be impossible
150 * to get any more (nothing is ever freed back to the
151 * map).
152 */
153 if (mb_map_full)
154 return (0);
155
156 npg = ncl;
157 p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
158 nowait ? M_NOWAIT : M_WAITOK);
159 /*
160 * Either the map is now full, or this is nowait and there
161 * are no pages left.
162 */
163 if (p == NULL)
164 return (0);
165
166 ncl = ncl * PAGE_SIZE / MCLBYTES;
167 for (i = 0; i < ncl; i++) {
168 ((union mcluster *)p)->mcl_next = mclfree;
169 mclfree = (union mcluster *)p;
170 p += MCLBYTES;
171 mbstat.m_clfree++;
172 }
173 mbstat.m_clusters += ncl;
174 return (1);
175 }
176
177 /*
178 * When MGET failes, ask protocols to free space when short of memory,
179 * then re-attempt to allocate an mbuf.
180 */
181 struct mbuf *
182 m_retry(i, t)
183 int i, t;
184 {
185 register struct mbuf *m;
186
187 /*
188 * Must only do the reclaim if not in an interrupt context.
189 */
190 if (i == M_WAIT)
191 m_reclaim();
192 #define m_retry(i, t) (struct mbuf *)0
193 MGET(m, i, t);
194 #undef m_retry
195 if (m != NULL)
196 mbstat.m_wait++;
197 else
198 mbstat.m_drops++;
199 return (m);
200 }
201
202 /*
203 * As above; retry an MGETHDR.
204 */
205 struct mbuf *
206 m_retryhdr(i, t)
207 int i, t;
208 {
209 register struct mbuf *m;
210
211 /*
212 * Must only do the reclaim if not in an interrupt context.
213 */
214 if (i == M_WAIT)
215 m_reclaim();
216 #define m_retryhdr(i, t) (struct mbuf *)0
217 MGETHDR(m, i, t);
218 #undef m_retryhdr
219 if (m != NULL)
220 mbstat.m_wait++;
221 else
222 mbstat.m_drops++;
223 return (m);
224 }
225
226 static void
227 m_reclaim()
228 {
229 register struct domain *dp;
230 register struct protosw *pr;
231 int s = splimp();
232
233 for (dp = domains; dp; dp = dp->dom_next)
234 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
235 if (pr->pr_drain)
236 (*pr->pr_drain)();
237 splx(s);
238 mbstat.m_drain++;
239 }
240
241 /*
242 * Space allocation routines.
243 * These are also available as macros
244 * for critical paths.
245 */
246 struct mbuf *
247 m_get(nowait, type)
248 int nowait, type;
249 {
250 register struct mbuf *m;
251
252 MGET(m, nowait, type);
253 return (m);
254 }
255
256 struct mbuf *
257 m_gethdr(nowait, type)
258 int nowait, type;
259 {
260 register struct mbuf *m;
261
262 MGETHDR(m, nowait, type);
263 return (m);
264 }
265
266 struct mbuf *
267 m_getclr(nowait, type)
268 int nowait, type;
269 {
270 register struct mbuf *m;
271
272 MGET(m, nowait, type);
273 if (m == 0)
274 return (0);
275 bzero(mtod(m, caddr_t), MLEN);
276 return (m);
277 }
278
279 struct mbuf *
280 m_free(m)
281 struct mbuf *m;
282 {
283 register struct mbuf *n;
284
285 MFREE(m, n);
286 return (n);
287 }
288
289 void
290 m_freem(m)
291 register struct mbuf *m;
292 {
293 register struct mbuf *n;
294
295 if (m == NULL)
296 return;
297 do {
298 MFREE(m, n);
299 m = n;
300 } while (m);
301 }
302
303 /*
304 * Mbuffer utility routines.
305 */
306
307 /*
308 * Lesser-used path for M_PREPEND:
309 * allocate new mbuf to prepend to chain,
310 * copy junk along.
311 */
312 struct mbuf *
313 m_prepend(m, len, how)
314 register struct mbuf *m;
315 int len, how;
316 {
317 struct mbuf *mn;
318
319 MGET(mn, how, m->m_type);
320 if (mn == (struct mbuf *)NULL) {
321 m_freem(m);
322 return ((struct mbuf *)NULL);
323 }
324 if (m->m_flags & M_PKTHDR) {
325 M_COPY_PKTHDR(mn, m);
326 m->m_flags &= ~M_PKTHDR;
327 }
328 mn->m_next = m;
329 m = mn;
330 if (len < MHLEN)
331 MH_ALIGN(m, len);
332 m->m_len = len;
333 return (m);
334 }
335
336 /*
337 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
338 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
339 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
340 */
341 static int MCFail;
342
343 struct mbuf *
344 m_copym(m, off0, len, wait)
345 register struct mbuf *m;
346 int off0, wait;
347 register int len;
348 {
349 register struct mbuf *n, **np;
350 register int off = off0;
351 struct mbuf *top;
352 int copyhdr = 0;
353
354 if (off < 0 || len < 0)
355 panic("m_copym");
356 if (off == 0 && m->m_flags & M_PKTHDR)
357 copyhdr = 1;
358 while (off > 0) {
359 if (m == 0)
360 panic("m_copym");
361 if (off < m->m_len)
362 break;
363 off -= m->m_len;
364 m = m->m_next;
365 }
366 np = ⊤
367 top = 0;
368 while (len > 0) {
369 if (m == 0) {
370 if (len != M_COPYALL)
371 panic("m_copym");
372 break;
373 }
374 MGET(n, wait, m->m_type);
375 *np = n;
376 if (n == 0)
377 goto nospace;
378 if (copyhdr) {
379 M_COPY_PKTHDR(n, m);
380 if (len == M_COPYALL)
381 n->m_pkthdr.len -= off0;
382 else
383 n->m_pkthdr.len = len;
384 copyhdr = 0;
385 }
386 n->m_len = min(len, m->m_len - off);
387 if (m->m_flags & M_EXT) {
388 n->m_data = m->m_data + off;
389 if(!m->m_ext.ext_ref)
390 mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
391 else
392 (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
393 m->m_ext.ext_size);
394 n->m_ext = m->m_ext;
395 n->m_flags |= M_EXT;
396 } else
397 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
398 (unsigned)n->m_len);
399 if (len != M_COPYALL)
400 len -= n->m_len;
401 off = 0;
402 m = m->m_next;
403 np = &n->m_next;
404 }
405 if (top == 0)
406 MCFail++;
407 return (top);
408 nospace:
409 m_freem(top);
410 MCFail++;
411 return (0);
412 }
413
414 /*
415 * Copy an entire packet, including header (which must be present).
416 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
417 */
418 struct mbuf *
419 m_copypacket(m, how)
420 struct mbuf *m;
421 int how;
422 {
423 struct mbuf *top, *n, *o;
424
425 MGET(n, how, m->m_type);
426 top = n;
427 if (!n)
428 goto nospace;
429
430 M_COPY_PKTHDR(n, m);
431 n->m_len = m->m_len;
432 if (m->m_flags & M_EXT) {
433 n->m_data = m->m_data;
434 mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
435 n->m_ext = m->m_ext;
436 n->m_flags |= M_EXT;
437 } else {
438 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
439 }
440
441 m = m->m_next;
442 while (m) {
443 MGET(o, how, m->m_type);
444 if (!o)
445 goto nospace;
446
447 n->m_next = o;
448 n = n->m_next;
449
450 n->m_len = m->m_len;
451 if (m->m_flags & M_EXT) {
452 n->m_data = m->m_data;
453 mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
454 n->m_ext = m->m_ext;
455 n->m_flags |= M_EXT;
456 } else {
457 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
458 }
459
460 m = m->m_next;
461 }
462 return top;
463 nospace:
464 m_freem(top);
465 MCFail++;
466 return 0;
467 }
468
469 /*
470 * Copy data from an mbuf chain starting "off" bytes from the beginning,
471 * continuing for "len" bytes, into the indicated buffer.
472 */
473 void
474 m_copydata(m, off, len, cp)
475 register struct mbuf *m;
476 register int off;
477 register int len;
478 caddr_t cp;
479 {
480 register unsigned count;
481
482 if (off < 0 || len < 0)
483 panic("m_copydata");
484 while (off > 0) {
485 if (m == 0)
486 panic("m_copydata");
487 if (off < m->m_len)
488 break;
489 off -= m->m_len;
490 m = m->m_next;
491 }
492 while (len > 0) {
493 if (m == 0)
494 panic("m_copydata");
495 count = min(m->m_len - off, len);
496 bcopy(mtod(m, caddr_t) + off, cp, count);
497 len -= count;
498 cp += count;
499 off = 0;
500 m = m->m_next;
501 }
502 }
503
504 /*
505 * Concatenate mbuf chain n to m.
506 * Both chains must be of the same type (e.g. MT_DATA).
507 * Any m_pkthdr is not updated.
508 */
509 void
510 m_cat(m, n)
511 register struct mbuf *m, *n;
512 {
513 while (m->m_next)
514 m = m->m_next;
515 while (n) {
516 if (m->m_flags & M_EXT ||
517 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
518 /* just join the two chains */
519 m->m_next = n;
520 return;
521 }
522 /* splat the data from one into the other */
523 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
524 (u_int)n->m_len);
525 m->m_len += n->m_len;
526 n = m_free(n);
527 }
528 }
529
530 void
531 m_adj(mp, req_len)
532 struct mbuf *mp;
533 int req_len;
534 {
535 register int len = req_len;
536 register struct mbuf *m;
537 register count;
538
539 if ((m = mp) == NULL)
540 return;
541 if (len >= 0) {
542 /*
543 * Trim from head.
544 */
545 while (m != NULL && len > 0) {
546 if (m->m_len <= len) {
547 len -= m->m_len;
548 m->m_len = 0;
549 m = m->m_next;
550 } else {
551 m->m_len -= len;
552 m->m_data += len;
553 len = 0;
554 }
555 }
556 m = mp;
557 if (mp->m_flags & M_PKTHDR)
558 m->m_pkthdr.len -= (req_len - len);
559 } else {
560 /*
561 * Trim from tail. Scan the mbuf chain,
562 * calculating its length and finding the last mbuf.
563 * If the adjustment only affects this mbuf, then just
564 * adjust and return. Otherwise, rescan and truncate
565 * after the remaining size.
566 */
567 len = -len;
568 count = 0;
569 for (;;) {
570 count += m->m_len;
571 if (m->m_next == (struct mbuf *)0)
572 break;
573 m = m->m_next;
574 }
575 if (m->m_len >= len) {
576 m->m_len -= len;
577 if (mp->m_flags & M_PKTHDR)
578 mp->m_pkthdr.len -= len;
579 return;
580 }
581 count -= len;
582 if (count < 0)
583 count = 0;
584 /*
585 * Correct length for chain is "count".
586 * Find the mbuf with last data, adjust its length,
587 * and toss data from remaining mbufs on chain.
588 */
589 m = mp;
590 if (m->m_flags & M_PKTHDR)
591 m->m_pkthdr.len = count;
592 for (; m; m = m->m_next) {
593 if (m->m_len >= count) {
594 m->m_len = count;
595 break;
596 }
597 count -= m->m_len;
598 }
599 while (m->m_next)
600 (m = m->m_next) ->m_len = 0;
601 }
602 }
603
604 /*
605 * Rearange an mbuf chain so that len bytes are contiguous
606 * and in the data area of an mbuf (so that mtod and dtom
607 * will work for a structure of size len). Returns the resulting
608 * mbuf chain on success, frees it and returns null on failure.
609 * If there is room, it will add up to max_protohdr-len extra bytes to the
610 * contiguous region in an attempt to avoid being called next time.
611 */
612 static int MPFail;
613
614 struct mbuf *
615 m_pullup(n, len)
616 register struct mbuf *n;
617 int len;
618 {
619 register struct mbuf *m;
620 register int count;
621 int space;
622
623 /*
624 * If first mbuf has no cluster, and has room for len bytes
625 * without shifting current data, pullup into it,
626 * otherwise allocate a new mbuf to prepend to the chain.
627 */
628 if ((n->m_flags & M_EXT) == 0 &&
629 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
630 if (n->m_len >= len)
631 return (n);
632 m = n;
633 n = n->m_next;
634 len -= m->m_len;
635 } else {
636 if (len > MHLEN)
637 goto bad;
638 MGET(m, M_DONTWAIT, n->m_type);
639 if (m == 0)
640 goto bad;
641 m->m_len = 0;
642 if (n->m_flags & M_PKTHDR) {
643 M_COPY_PKTHDR(m, n);
644 n->m_flags &= ~M_PKTHDR;
645 }
646 }
647 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
648 do {
649 count = min(min(max(len, max_protohdr), space), n->m_len);
650 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
651 (unsigned)count);
652 len -= count;
653 m->m_len += count;
654 n->m_len -= count;
655 space -= count;
656 if (n->m_len)
657 n->m_data += count;
658 else
659 n = m_free(n);
660 } while (len > 0 && n);
661 if (len > 0) {
662 (void) m_free(m);
663 goto bad;
664 }
665 m->m_next = n;
666 return (m);
667 bad:
668 m_freem(n);
669 MPFail++;
670 return (0);
671 }
672
673 /*
674 * Partition an mbuf chain in two pieces, returning the tail --
675 * all but the first len0 bytes. In case of failure, it returns NULL and
676 * attempts to restore the chain to its original state.
677 */
678 struct mbuf *
679 m_split(m0, len0, wait)
680 register struct mbuf *m0;
681 int len0, wait;
682 {
683 register struct mbuf *m, *n;
684 unsigned len = len0, remain;
685
686 for (m = m0; m && len > m->m_len; m = m->m_next)
687 len -= m->m_len;
688 if (m == 0)
689 return (0);
690 remain = m->m_len - len;
691 if (m0->m_flags & M_PKTHDR) {
692 MGETHDR(n, wait, m0->m_type);
693 if (n == 0)
694 return (0);
695 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
696 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
697 m0->m_pkthdr.len = len0;
698 if (m->m_flags & M_EXT)
699 goto extpacket;
700 if (remain > MHLEN) {
701 /* m can't be the lead packet */
702 MH_ALIGN(n, 0);
703 n->m_next = m_split(m, len, wait);
704 if (n->m_next == 0) {
705 (void) m_free(n);
706 return (0);
707 } else
708 return (n);
709 } else
710 MH_ALIGN(n, remain);
711 } else if (remain == 0) {
712 n = m->m_next;
713 m->m_next = 0;
714 return (n);
715 } else {
716 MGET(n, wait, m->m_type);
717 if (n == 0)
718 return (0);
719 M_ALIGN(n, remain);
720 }
721 extpacket:
722 if (m->m_flags & M_EXT) {
723 n->m_flags |= M_EXT;
724 n->m_ext = m->m_ext;
725 if(!m->m_ext.ext_ref)
726 mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
727 else
728 (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
729 m->m_ext.ext_size);
730 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
731 n->m_data = m->m_data + len;
732 } else {
733 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
734 }
735 n->m_len = remain;
736 m->m_len = len;
737 n->m_next = m->m_next;
738 m->m_next = 0;
739 return (n);
740 }
741 /*
742 * Routine to copy from device local memory into mbufs.
743 */
744 struct mbuf *
745 m_devget(buf, totlen, off0, ifp, copy)
746 char *buf;
747 int totlen, off0;
748 struct ifnet *ifp;
749 void (*copy) __P((char *from, caddr_t to, u_int len));
750 {
751 register struct mbuf *m;
752 struct mbuf *top = 0, **mp = ⊤
753 register int off = off0, len;
754 register char *cp;
755 char *epkt;
756
757 cp = buf;
758 epkt = cp + totlen;
759 if (off) {
760 cp += off + 2 * sizeof(u_short);
761 totlen -= 2 * sizeof(u_short);
762 }
763 MGETHDR(m, M_DONTWAIT, MT_DATA);
764 if (m == 0)
765 return (0);
766 m->m_pkthdr.rcvif = ifp;
767 m->m_pkthdr.len = totlen;
768 m->m_len = MHLEN;
769
770 while (totlen > 0) {
771 if (top) {
772 MGET(m, M_DONTWAIT, MT_DATA);
773 if (m == 0) {
774 m_freem(top);
775 return (0);
776 }
777 m->m_len = MLEN;
778 }
779 len = min(totlen, epkt - cp);
780 if (len >= MINCLSIZE) {
781 MCLGET(m, M_DONTWAIT);
782 if (m->m_flags & M_EXT)
783 m->m_len = len = min(len, MCLBYTES);
784 else
785 len = m->m_len;
786 } else {
787 /*
788 * Place initial small packet/header at end of mbuf.
789 */
790 if (len < m->m_len) {
791 if (top == 0 && len + max_linkhdr <= m->m_len)
792 m->m_data += max_linkhdr;
793 m->m_len = len;
794 } else
795 len = m->m_len;
796 }
797 if (copy)
798 copy(cp, mtod(m, caddr_t), (unsigned)len);
799 else
800 bcopy(cp, mtod(m, caddr_t), (unsigned)len);
801 cp += len;
802 *mp = m;
803 mp = &m->m_next;
804 totlen -= len;
805 if (cp == epkt)
806 cp = buf;
807 }
808 return (top);
809 }
810
811 /*
812 * Copy data from a buffer back into the indicated mbuf chain,
813 * starting "off" bytes from the beginning, extending the mbuf
814 * chain if necessary.
815 */
816 void
817 m_copyback(m0, off, len, cp)
818 struct mbuf *m0;
819 register int off;
820 register int len;
821 caddr_t cp;
822 {
823 register int mlen;
824 register struct mbuf *m = m0, *n;
825 int totlen = 0;
826
827 if (m0 == 0)
828 return;
829 while (off > (mlen = m->m_len)) {
830 off -= mlen;
831 totlen += mlen;
832 if (m->m_next == 0) {
833 n = m_getclr(M_DONTWAIT, m->m_type);
834 if (n == 0)
835 goto out;
836 n->m_len = min(MLEN, len + off);
837 m->m_next = n;
838 }
839 m = m->m_next;
840 }
841 while (len > 0) {
842 mlen = min (m->m_len - off, len);
843 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
844 cp += mlen;
845 len -= mlen;
846 mlen += off;
847 off = 0;
848 totlen += mlen;
849 if (len == 0)
850 break;
851 if (m->m_next == 0) {
852 n = m_get(M_DONTWAIT, m->m_type);
853 if (n == 0)
854 break;
855 n->m_len = min(MLEN, len);
856 m->m_next = n;
857 }
858 m = m->m_next;
859 }
860 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
861 m->m_pkthdr.len = totlen;
862 }
Cache object: daa2e1379cf16cef3f73b7a35ab5bbba
|