The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/uipc_mbuf.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1982, 1986, 1988, 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. All advertising materials mentioning features or use of this software
   14  *    must display the following acknowledgement:
   15  *      This product includes software developed by the University of
   16  *      California, Berkeley and its contributors.
   17  * 4. Neither the name of the University nor the names of its contributors
   18  *    may be used to endorse or promote products derived from this software
   19  *    without specific prior written permission.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   31  * SUCH DAMAGE.
   32  *
   33  *      @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
   34  * $FreeBSD$
   35  */
   36 
   37 #include <sys/param.h>
   38 #include <sys/systm.h>
   39 #include <sys/malloc.h>
   40 #include <sys/mbuf.h>
   41 #include <sys/kernel.h>
   42 #include <sys/sysctl.h>
   43 #include <sys/domain.h>
   44 #include <sys/protosw.h>
   45 
   46 #include <vm/vm.h>
   47 #include <vm/vm_kern.h>
   48 #include <vm/vm_extern.h>
   49 
   50 #ifdef INVARIANTS
   51 #include <machine/cpu.h>
   52 #endif
   53 
   54 static void mbinit __P((void *));
   55 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
   56 
   57 struct mbuf *mbutl;
   58 char    *mclrefcnt;
   59 struct mbstat mbstat;
   60 struct mbuf *mmbfree;
   61 union mcluster *mclfree;
   62 int     max_linkhdr;
   63 int     max_protohdr;
   64 int     max_hdr;
   65 int     max_datalen;
   66 u_int   m_mballoc_wid = 0;
   67 u_int   m_clalloc_wid = 0;
   68 
   69 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
   70            &max_linkhdr, 0, "");
   71 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
   72            &max_protohdr, 0, "");
   73 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
   74 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
   75            &max_datalen, 0, "");
   76 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
   77            &mbuf_wait, 0, "");
   78 SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
   79 
   80 static void     m_reclaim __P((void));
   81 
   82 /* "number of clusters of pages" */
   83 #define NCL_INIT        1
   84 
   85 #define NMB_INIT        16
   86 
   87 /* ARGSUSED*/
   88 static void
   89 mbinit(dummy)
   90         void *dummy;
   91 {
   92         int s;
   93 
   94         mmbfree = NULL; mclfree = NULL;
   95         mbstat.m_msize = MSIZE;
   96         mbstat.m_mclbytes = MCLBYTES;
   97         mbstat.m_minclsize = MINCLSIZE;
   98         mbstat.m_mlen = MLEN;
   99         mbstat.m_mhlen = MHLEN;
  100 
  101         s = splimp();
  102         if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
  103                 goto bad;
  104 #if MCLBYTES <= PAGE_SIZE
  105         if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
  106                 goto bad;
  107 #else
  108         /* It's OK to call contigmalloc in this context. */
  109         if (m_clalloc(16, M_WAIT) == 0)
  110                 goto bad;
  111 #endif
  112         splx(s);
  113         return;
  114 bad:
  115         panic("mbinit");
  116 }
  117 
  118 /*
  119  * Allocate at least nmb mbufs and place on mbuf free list.
  120  * Must be called at splimp.
  121  */
  122 /* ARGSUSED */
  123 int
  124 m_mballoc(nmb, how)
  125         register int nmb;
  126         int how;
  127 {
  128         register caddr_t p;
  129         register int i;
  130         int nbytes;
  131 
  132         /*
  133          * If we've hit the mbuf limit, stop allocating from mb_map,
  134          * (or trying to) in order to avoid dipping into the section of
  135          * mb_map which we've "reserved" for clusters.
  136          */
  137         if ((nmb + mbstat.m_mbufs) > nmbufs)
  138                 return (0);
  139 
  140         /*
  141          * Once we run out of map space, it will be impossible to get
  142          * any more (nothing is ever freed back to the map)
  143          * -- however you are not dead as m_reclaim might
  144          * still be able to free a substantial amount of space.
  145          *
  146          * XXX Furthermore, we can also work with "recycled" mbufs (when
  147          * we're calling with M_WAIT the sleep procedure will be woken
  148          * up when an mbuf is freed. See m_mballoc_wait()).
  149          */
  150         if (mb_map_full)
  151                 return (0);
  152 
  153         nbytes = round_page(nmb * MSIZE);
  154         p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
  155         if (p == 0 && how == M_WAIT) {
  156                 mbstat.m_wait++;
  157                 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
  158         }
  159 
  160         /*
  161          * Either the map is now full, or `how' is M_NOWAIT and there
  162          * are no pages left.
  163          */
  164         if (p == NULL)
  165                 return (0);
  166 
  167         nmb = nbytes / MSIZE;
  168         for (i = 0; i < nmb; i++) {
  169                 ((struct mbuf *)p)->m_next = mmbfree;
  170                 mmbfree = (struct mbuf *)p;
  171                 p += MSIZE;
  172         }
  173         mbstat.m_mbufs += nmb;
  174         return (1);
  175 }
  176 
  177 /*
  178  * Once the mb_map has been exhausted and if the call to the allocation macros
  179  * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely
  180  * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a 
  181  * designated (mbuf_wait) time. 
  182  */
  183 struct mbuf *
  184 m_mballoc_wait(int caller, int type)
  185 {
  186         struct mbuf *p;
  187         int s;
  188 
  189         m_mballoc_wid++;
  190         if ((tsleep(&m_mballoc_wid, PVM, "mballc", mbuf_wait)) == EWOULDBLOCK)
  191                 m_mballoc_wid--;
  192 
  193         /*
  194          * Now that we (think) that we've got something, we will redo an
  195          * MGET, but avoid getting into another instance of m_mballoc_wait()
  196          * XXX: We retry to fetch _even_ if the sleep timed out. This is left
  197          *      this way, purposely, in the [unlikely] case that an mbuf was
  198          *      freed but the sleep was not awakened in time. 
  199          */
  200         p = NULL;
  201         switch (caller) {
  202         case MGET_C:
  203                 MGET(p, M_DONTWAIT, type);
  204                 break;
  205         case MGETHDR_C:
  206                 MGETHDR(p, M_DONTWAIT, type);
  207                 break;
  208         default:
  209                 panic("m_mballoc_wait: invalid caller (%d)", caller);
  210         }
  211 
  212         s = splimp();
  213         if (p != NULL) {                /* We waited and got something... */
  214                 mbstat.m_wait++;
  215                 /* Wake up another if we have more free. */
  216                 if (mmbfree != NULL)
  217                         MMBWAKEUP();
  218         }
  219         splx(s);
  220         return (p);
  221 }
  222 
  223 #if MCLBYTES > PAGE_SIZE
  224 static int i_want_my_mcl;
  225 
  226 static void
  227 kproc_mclalloc(void)
  228 {
  229         int status;
  230 
  231         while (1) {
  232                 tsleep(&i_want_my_mcl, PVM, "mclalloc", 0);
  233 
  234                 for (; i_want_my_mcl; i_want_my_mcl--) {
  235                         if (m_clalloc(1, M_WAIT) == 0)
  236                                 printf("m_clalloc failed even in process context!\n");
  237                 }
  238         }
  239 }
  240 
  241 static struct proc *mclallocproc;
  242 static struct kproc_desc mclalloc_kp = {
  243         "mclalloc",
  244         kproc_mclalloc,
  245         &mclallocproc
  246 };
  247 SYSINIT_KT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
  248            &mclalloc_kp);
  249 #endif
  250 
  251 /*
  252  * Allocate some number of mbuf clusters
  253  * and place on cluster free list.
  254  * Must be called at splimp.
  255  */
  256 /* ARGSUSED */
  257 int
  258 m_clalloc(ncl, how)
  259         register int ncl;
  260         int how;
  261 {
  262         register caddr_t p;
  263         register int i;
  264         int npg;
  265 
  266         /*
  267          * If we've hit the mcluster number limit, stop allocating from
  268          * mb_map, (or trying to) in order to avoid dipping into the section
  269          * of mb_map which we've "reserved" for mbufs.
  270          */
  271         if ((ncl + mbstat.m_clusters) > nmbclusters) {
  272                 mbstat.m_drops++;
  273                 return (0);
  274         }
  275 
  276         /*
  277          * Once we run out of map space, it will be impossible
  278          * to get any more (nothing is ever freed back to the
  279          * map). From this point on, we solely rely on freed 
  280          * mclusters.
  281          */
  282         if (mb_map_full) {
  283                 mbstat.m_drops++;
  284                 return (0);
  285         }
  286 
  287 #if MCLBYTES > PAGE_SIZE
  288         if (how != M_WAIT) {
  289                 i_want_my_mcl += ncl;
  290                 wakeup(&i_want_my_mcl);
  291                 mbstat.m_wait++;
  292                 p = 0;
  293         } else {
  294                 p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
  295                                   ~0ul, PAGE_SIZE, 0, mb_map);
  296         }
  297 #else
  298         npg = ncl;
  299         p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
  300                                  how != M_WAIT ? M_NOWAIT : M_WAITOK);
  301         ncl = ncl * PAGE_SIZE / MCLBYTES;
  302 #endif
  303         /*
  304          * Either the map is now full, or `how' is M_NOWAIT and there
  305          * are no pages left.
  306          */
  307         if (p == NULL) {
  308                 mbstat.m_drops++;
  309                 return (0);
  310         }
  311 
  312         for (i = 0; i < ncl; i++) {
  313                 ((union mcluster *)p)->mcl_next = mclfree;
  314                 mclfree = (union mcluster *)p;
  315                 p += MCLBYTES;
  316                 mbstat.m_clfree++;
  317         }
  318         mbstat.m_clusters += ncl;
  319         return (1);
  320 }
  321 
  322 /*
  323  * Once the mb_map submap has been exhausted and the allocation is called with
  324  * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will
  325  * sleep for a designated amount of time (mbuf_wait) or until we're woken up
  326  * due to sudden mcluster availability.
  327  */
  328 caddr_t
  329 m_clalloc_wait(void)
  330 {
  331         caddr_t p;
  332         int s;
  333 
  334 #ifdef __i386__
  335         /* If in interrupt context, and INVARIANTS, maintain sanity and die. */
  336         KASSERT(intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
  337 #endif
  338 
  339         /* Sleep until something's available or until we expire. */
  340         m_clalloc_wid++;
  341         if ((tsleep(&m_clalloc_wid, PVM, "mclalc", mbuf_wait)) == EWOULDBLOCK)
  342                 m_clalloc_wid--;
  343 
  344         /*
  345          * Now that we (think) that we've got something, we will redo and
  346          * MGET, but avoid getting into another instance of m_clalloc_wait()
  347          */
  348         p = NULL;
  349         MCLALLOC(p, M_DONTWAIT);
  350 
  351         s = splimp();
  352         if (p != NULL) {        /* We waited and got something... */
  353                 mbstat.m_wait++;
  354                 /* Wake up another if we have more free. */
  355                 if (mclfree != NULL)
  356                         MCLWAKEUP();
  357         }
  358 
  359         splx(s);
  360         return (p);
  361 }
  362 
  363 /*
  364  * When MGET fails, ask protocols to free space when short of memory,
  365  * then re-attempt to allocate an mbuf.
  366  */
  367 struct mbuf *
  368 m_retry(i, t)
  369         int i, t;
  370 {
  371         register struct mbuf *m;
  372 
  373         /*
  374          * Must only do the reclaim if not in an interrupt context.
  375          */
  376         if (i == M_WAIT) {
  377 #ifdef __i386__
  378                 KASSERT(intr_nesting_level == 0,
  379                     ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
  380 #endif
  381                 m_reclaim();
  382         }
  383 
  384         /*
  385          * Both m_mballoc_wait and m_retry must be nulled because
  386          * when the MGET macro is run from here, we deffinately do _not_
  387          * want to enter an instance of m_mballoc_wait() or m_retry() (again!)
  388          */
  389 #define m_mballoc_wait(caller,type)    (struct mbuf *)0
  390 #define m_retry(i, t)   (struct mbuf *)0
  391         MGET(m, i, t);
  392 #undef m_retry
  393 #undef m_mballoc_wait
  394 
  395         if (m != NULL)
  396                 mbstat.m_wait++;
  397         else
  398                 mbstat.m_drops++;
  399 
  400         return (m);
  401 }
  402 
  403 /*
  404  * As above; retry an MGETHDR.
  405  */
  406 struct mbuf *
  407 m_retryhdr(i, t)
  408         int i, t;
  409 {
  410         register struct mbuf *m;
  411 
  412         /*
  413          * Must only do the reclaim if not in an interrupt context.
  414          */
  415         if (i == M_WAIT) {
  416 #ifdef __i386__
  417                 KASSERT(intr_nesting_level == 0,
  418                     ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
  419 #endif
  420                 m_reclaim();
  421         }
  422 
  423 #define m_mballoc_wait(caller,type)    (struct mbuf *)0
  424 #define m_retryhdr(i, t) (struct mbuf *)0
  425         MGETHDR(m, i, t);
  426 #undef m_retryhdr
  427 #undef m_mballoc_wait
  428 
  429         if (m != NULL)  
  430                 mbstat.m_wait++;
  431         else    
  432                 mbstat.m_drops++;
  433         
  434         return (m);
  435 }
  436 
  437 static void
  438 m_reclaim()
  439 {
  440         register struct domain *dp;
  441         register struct protosw *pr;
  442         int s = splimp();
  443 
  444         for (dp = domains; dp; dp = dp->dom_next)
  445                 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
  446                         if (pr->pr_drain)
  447                                 (*pr->pr_drain)();
  448         splx(s);
  449         mbstat.m_drain++;
  450 }
  451 
  452 /*
  453  * Space allocation routines.
  454  * These are also available as macros
  455  * for critical paths.
  456  */
  457 struct mbuf *
  458 m_get(how, type)
  459         int how, type;
  460 {
  461         register struct mbuf *m;
  462 
  463         MGET(m, how, type);
  464         return (m);
  465 }
  466 
  467 struct mbuf *
  468 m_gethdr(how, type)
  469         int how, type;
  470 {
  471         register struct mbuf *m;
  472 
  473         MGETHDR(m, how, type);
  474         return (m);
  475 }
  476 
  477 struct mbuf *
  478 m_getclr(how, type)
  479         int how, type;
  480 {
  481         register struct mbuf *m;
  482 
  483         MGET(m, how, type);
  484         if (m == 0)
  485                 return (0);
  486         bzero(mtod(m, caddr_t), MLEN);
  487         return (m);
  488 }
  489 
  490 struct mbuf *
  491 m_free(m)
  492         struct mbuf *m;
  493 {
  494         register struct mbuf *n;
  495 
  496         MFREE(m, n);
  497         return (n);
  498 }
  499 
  500 void
  501 m_freem(m)
  502         register struct mbuf *m;
  503 {
  504         register struct mbuf *n;
  505 
  506         if (m == NULL)
  507                 return;
  508         do {
  509                 MFREE(m, n);
  510                 m = n;
  511         } while (m);
  512 }
  513 
  514 /*
  515  * Mbuffer utility routines.
  516  */
  517 
  518 /*
  519  * Lesser-used path for M_PREPEND:
  520  * allocate new mbuf to prepend to chain,
  521  * copy junk along.
  522  */
  523 struct mbuf *
  524 m_prepend(m, len, how)
  525         register struct mbuf *m;
  526         int len, how;
  527 {
  528         struct mbuf *mn;
  529 
  530         MGET(mn, how, m->m_type);
  531         if (mn == (struct mbuf *)NULL) {
  532                 m_freem(m);
  533                 return ((struct mbuf *)NULL);
  534         }
  535         if (m->m_flags & M_PKTHDR) {
  536                 M_COPY_PKTHDR(mn, m);
  537                 m->m_flags &= ~M_PKTHDR;
  538         }
  539         mn->m_next = m;
  540         m = mn;
  541         if (len < MHLEN)
  542                 MH_ALIGN(m, len);
  543         m->m_len = len;
  544         return (m);
  545 }
  546 
  547 /*
  548  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
  549  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
  550  * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
  551  * Note that the copy is read-only, because clusters are not copied,
  552  * only their reference counts are incremented.
  553  */
  554 #define MCFail (mbstat.m_mcfail)
  555 
  556 struct mbuf *
  557 m_copym(m, off0, len, wait)
  558         register struct mbuf *m;
  559         int off0, wait;
  560         register int len;
  561 {
  562         register struct mbuf *n, **np;
  563         register int off = off0;
  564         struct mbuf *top;
  565         int copyhdr = 0;
  566 
  567         if (off < 0 || len < 0)
  568                 panic("m_copym");
  569         if (off == 0 && m->m_flags & M_PKTHDR)
  570                 copyhdr = 1;
  571         while (off > 0) {
  572                 if (m == 0)
  573                         panic("m_copym");
  574                 if (off < m->m_len)
  575                         break;
  576                 off -= m->m_len;
  577                 m = m->m_next;
  578         }
  579         np = &top;
  580         top = 0;
  581         while (len > 0) {
  582                 if (m == 0) {
  583                         if (len != M_COPYALL)
  584                                 panic("m_copym");
  585                         break;
  586                 }
  587                 MGET(n, wait, m->m_type);
  588                 *np = n;
  589                 if (n == 0)
  590                         goto nospace;
  591                 if (copyhdr) {
  592                         M_COPY_PKTHDR(n, m);
  593                         if (len == M_COPYALL)
  594                                 n->m_pkthdr.len -= off0;
  595                         else
  596                                 n->m_pkthdr.len = len;
  597                         copyhdr = 0;
  598                 }
  599                 n->m_len = min(len, m->m_len - off);
  600                 if (m->m_flags & M_EXT) {
  601                         n->m_data = m->m_data + off;
  602                         if(!m->m_ext.ext_ref)
  603                                 mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
  604                         else
  605                                 (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
  606                                                         m->m_ext.ext_size);
  607                         n->m_ext = m->m_ext;
  608                         n->m_flags |= M_EXT;
  609                 } else
  610                         bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
  611                             (unsigned)n->m_len);
  612                 if (len != M_COPYALL)
  613                         len -= n->m_len;
  614                 off = 0;
  615                 m = m->m_next;
  616                 np = &n->m_next;
  617         }
  618         if (top == 0)
  619                 MCFail++;
  620         return (top);
  621 nospace:
  622         m_freem(top);
  623         MCFail++;
  624         return (0);
  625 }
  626 
  627 /*
  628  * Copy an entire packet, including header (which must be present).
  629  * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
  630  * Note that the copy is read-only, because clusters are not copied,
  631  * only their reference counts are incremented.
  632  */
  633 struct mbuf *
  634 m_copypacket(m, how)
  635         struct mbuf *m;
  636         int how;
  637 {
  638         struct mbuf *top, *n, *o;
  639 
  640         MGET(n, how, m->m_type);
  641         top = n;
  642         if (!n)
  643                 goto nospace;
  644 
  645         M_COPY_PKTHDR(n, m);
  646         n->m_len = m->m_len;
  647         if (m->m_flags & M_EXT) {
  648                 n->m_data = m->m_data;
  649                 if(!m->m_ext.ext_ref)
  650                         mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
  651                 else
  652                         (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
  653                                                 m->m_ext.ext_size);
  654                 n->m_ext = m->m_ext;
  655                 n->m_flags |= M_EXT;
  656         } else {
  657                 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
  658         }
  659 
  660         m = m->m_next;
  661         while (m) {
  662                 MGET(o, how, m->m_type);
  663                 if (!o)
  664                         goto nospace;
  665 
  666                 n->m_next = o;
  667                 n = n->m_next;
  668 
  669                 n->m_len = m->m_len;
  670                 if (m->m_flags & M_EXT) {
  671                         n->m_data = m->m_data;
  672                         if(!m->m_ext.ext_ref)
  673                                 mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
  674                         else
  675                                 (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
  676                                                         m->m_ext.ext_size);
  677                         n->m_ext = m->m_ext;
  678                         n->m_flags |= M_EXT;
  679                 } else {
  680                         bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
  681                 }
  682 
  683                 m = m->m_next;
  684         }
  685         return top;
  686 nospace:
  687         m_freem(top);
  688         MCFail++;
  689         return 0;
  690 }
  691 
  692 /*
  693  * Copy data from an mbuf chain starting "off" bytes from the beginning,
  694  * continuing for "len" bytes, into the indicated buffer.
  695  */
  696 void
  697 m_copydata(m, off, len, cp)
  698         register struct mbuf *m;
  699         register int off;
  700         register int len;
  701         caddr_t cp;
  702 {
  703         register unsigned count;
  704 
  705         if (off < 0 || len < 0)
  706                 panic("m_copydata");
  707         while (off > 0) {
  708                 if (m == 0)
  709                         panic("m_copydata");
  710                 if (off < m->m_len)
  711                         break;
  712                 off -= m->m_len;
  713                 m = m->m_next;
  714         }
  715         while (len > 0) {
  716                 if (m == 0)
  717                         panic("m_copydata");
  718                 count = min(m->m_len - off, len);
  719                 bcopy(mtod(m, caddr_t) + off, cp, count);
  720                 len -= count;
  721                 cp += count;
  722                 off = 0;
  723                 m = m->m_next;
  724         }
  725 }
  726 
  727 /*
  728  * Copy a packet header mbuf chain into a completely new chain, including
  729  * copying any mbuf clusters.  Use this instead of m_copypacket() when
  730  * you need a writable copy of an mbuf chain.
  731  */
  732 struct mbuf *
  733 m_dup(m, how)
  734         struct mbuf *m;
  735         int how;
  736 {
  737         struct mbuf **p, *top = NULL;
  738         int remain, moff, nsize;
  739 
  740         /* Sanity check */
  741         if (m == NULL)
  742                 return (0);
  743         KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__));
  744 
  745         /* While there's more data, get a new mbuf, tack it on, and fill it */
  746         remain = m->m_pkthdr.len;
  747         moff = 0;
  748         p = &top;
  749         while (remain > 0 || top == NULL) {     /* allow m->m_pkthdr.len == 0 */
  750                 struct mbuf *n;
  751 
  752                 /* Get the next new mbuf */
  753                 MGET(n, how, m->m_type);
  754                 if (n == NULL)
  755                         goto nospace;
  756                 if (top == NULL) {              /* first one, must be PKTHDR */
  757                         M_COPY_PKTHDR(n, m);
  758                         nsize = MHLEN;
  759                 } else                          /* not the first one */
  760                         nsize = MLEN;
  761                 if (remain >= MINCLSIZE) {
  762                         MCLGET(n, how);
  763                         if ((n->m_flags & M_EXT) == 0) {
  764                                 (void)m_free(n);
  765                                 goto nospace;
  766                         }
  767                         nsize = MCLBYTES;
  768                 }
  769                 n->m_len = 0;
  770 
  771                 /* Link it into the new chain */
  772                 *p = n;
  773                 p = &n->m_next;
  774 
  775                 /* Copy data from original mbuf(s) into new mbuf */
  776                 while (n->m_len < nsize && m != NULL) {
  777                         int chunk = min(nsize - n->m_len, m->m_len - moff);
  778 
  779                         bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
  780                         moff += chunk;
  781                         n->m_len += chunk;
  782                         remain -= chunk;
  783                         if (moff == m->m_len) {
  784                                 m = m->m_next;
  785                                 moff = 0;
  786                         }
  787                 }
  788 
  789                 /* Check correct total mbuf length */
  790                 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
  791                         ("%s: bogus m_pkthdr.len", __FUNCTION__));
  792         }
  793         return (top);
  794 
  795 nospace:
  796         m_freem(top);
  797         MCFail++;
  798         return (0);
  799 }
  800 
  801 /*
  802  * Concatenate mbuf chain n to m.
  803  * Both chains must be of the same type (e.g. MT_DATA).
  804  * Any m_pkthdr is not updated.
  805  */
  806 void
  807 m_cat(m, n)
  808         register struct mbuf *m, *n;
  809 {
  810         while (m->m_next)
  811                 m = m->m_next;
  812         while (n) {
  813                 if (m->m_flags & M_EXT ||
  814                     m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
  815                         /* just join the two chains */
  816                         m->m_next = n;
  817                         return;
  818                 }
  819                 /* splat the data from one into the other */
  820                 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
  821                     (u_int)n->m_len);
  822                 m->m_len += n->m_len;
  823                 n = m_free(n);
  824         }
  825 }
  826 
  827 void
  828 m_adj(mp, req_len)
  829         struct mbuf *mp;
  830         int req_len;
  831 {
  832         register int len = req_len;
  833         register struct mbuf *m;
  834         register int count;
  835 
  836         if ((m = mp) == NULL)
  837                 return;
  838         if (len >= 0) {
  839                 /*
  840                  * Trim from head.
  841                  */
  842                 while (m != NULL && len > 0) {
  843                         if (m->m_len <= len) {
  844                                 len -= m->m_len;
  845                                 m->m_len = 0;
  846                                 m = m->m_next;
  847                         } else {
  848                                 m->m_len -= len;
  849                                 m->m_data += len;
  850                                 len = 0;
  851                         }
  852                 }
  853                 m = mp;
  854                 if (mp->m_flags & M_PKTHDR)
  855                         m->m_pkthdr.len -= (req_len - len);
  856         } else {
  857                 /*
  858                  * Trim from tail.  Scan the mbuf chain,
  859                  * calculating its length and finding the last mbuf.
  860                  * If the adjustment only affects this mbuf, then just
  861                  * adjust and return.  Otherwise, rescan and truncate
  862                  * after the remaining size.
  863                  */
  864                 len = -len;
  865                 count = 0;
  866                 for (;;) {
  867                         count += m->m_len;
  868                         if (m->m_next == (struct mbuf *)0)
  869                                 break;
  870                         m = m->m_next;
  871                 }
  872                 if (m->m_len >= len) {
  873                         m->m_len -= len;
  874                         if (mp->m_flags & M_PKTHDR)
  875                                 mp->m_pkthdr.len -= len;
  876                         return;
  877                 }
  878                 count -= len;
  879                 if (count < 0)
  880                         count = 0;
  881                 /*
  882                  * Correct length for chain is "count".
  883                  * Find the mbuf with last data, adjust its length,
  884                  * and toss data from remaining mbufs on chain.
  885                  */
  886                 m = mp;
  887                 if (m->m_flags & M_PKTHDR)
  888                         m->m_pkthdr.len = count;
  889                 for (; m; m = m->m_next) {
  890                         if (m->m_len >= count) {
  891                                 m->m_len = count;
  892                                 break;
  893                         }
  894                         count -= m->m_len;
  895                 }
  896                 while (m->m_next)
  897                         (m = m->m_next) ->m_len = 0;
  898         }
  899 }
  900 
  901 /*
  902  * Rearange an mbuf chain so that len bytes are contiguous
  903  * and in the data area of an mbuf (so that mtod and dtom
  904  * will work for a structure of size len).  Returns the resulting
  905  * mbuf chain on success, frees it and returns null on failure.
  906  * If there is room, it will add up to max_protohdr-len extra bytes to the
  907  * contiguous region in an attempt to avoid being called next time.
  908  */
  909 #define MPFail (mbstat.m_mpfail)
  910 
  911 struct mbuf *
  912 m_pullup(n, len)
  913         register struct mbuf *n;
  914         int len;
  915 {
  916         register struct mbuf *m;
  917         register int count;
  918         int space;
  919 
  920         /*
  921          * If first mbuf has no cluster, and has room for len bytes
  922          * without shifting current data, pullup into it,
  923          * otherwise allocate a new mbuf to prepend to the chain.
  924          */
  925         if ((n->m_flags & M_EXT) == 0 &&
  926             n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
  927                 if (n->m_len >= len)
  928                         return (n);
  929                 m = n;
  930                 n = n->m_next;
  931                 len -= m->m_len;
  932         } else {
  933                 if (len > MHLEN)
  934                         goto bad;
  935                 MGET(m, M_DONTWAIT, n->m_type);
  936                 if (m == 0)
  937                         goto bad;
  938                 m->m_len = 0;
  939                 if (n->m_flags & M_PKTHDR) {
  940                         M_COPY_PKTHDR(m, n);
  941                         n->m_flags &= ~M_PKTHDR;
  942                 }
  943         }
  944         space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
  945         do {
  946                 count = min(min(max(len, max_protohdr), space), n->m_len);
  947                 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
  948                   (unsigned)count);
  949                 len -= count;
  950                 m->m_len += count;
  951                 n->m_len -= count;
  952                 space -= count;
  953                 if (n->m_len)
  954                         n->m_data += count;
  955                 else
  956                         n = m_free(n);
  957         } while (len > 0 && n);
  958         if (len > 0) {
  959                 (void) m_free(m);
  960                 goto bad;
  961         }
  962         m->m_next = n;
  963         return (m);
  964 bad:
  965         m_freem(n);
  966         MPFail++;
  967         return (0);
  968 }
  969 
  970 /*
  971  * Partition an mbuf chain in two pieces, returning the tail --
  972  * all but the first len0 bytes.  In case of failure, it returns NULL and
  973  * attempts to restore the chain to its original state.
  974  */
  975 struct mbuf *
  976 m_split(m0, len0, wait)
  977         register struct mbuf *m0;
  978         int len0, wait;
  979 {
  980         register struct mbuf *m, *n;
  981         unsigned len = len0, remain;
  982 
  983         for (m = m0; m && len > m->m_len; m = m->m_next)
  984                 len -= m->m_len;
  985         if (m == 0)
  986                 return (0);
  987         remain = m->m_len - len;
  988         if (m0->m_flags & M_PKTHDR) {
  989                 MGETHDR(n, wait, m0->m_type);
  990                 if (n == 0)
  991                         return (0);
  992                 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
  993                 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
  994                 m0->m_pkthdr.len = len0;
  995                 if (m->m_flags & M_EXT)
  996                         goto extpacket;
  997                 if (remain > MHLEN) {
  998                         /* m can't be the lead packet */
  999                         MH_ALIGN(n, 0);
 1000                         n->m_next = m_split(m, len, wait);
 1001                         if (n->m_next == 0) {
 1002                                 (void) m_free(n);
 1003                                 return (0);
 1004                         } else
 1005                                 return (n);
 1006                 } else
 1007                         MH_ALIGN(n, remain);
 1008         } else if (remain == 0) {
 1009                 n = m->m_next;
 1010                 m->m_next = 0;
 1011                 return (n);
 1012         } else {
 1013                 MGET(n, wait, m->m_type);
 1014                 if (n == 0)
 1015                         return (0);
 1016                 M_ALIGN(n, remain);
 1017         }
 1018 extpacket:
 1019         if (m->m_flags & M_EXT) {
 1020                 n->m_flags |= M_EXT;
 1021                 n->m_ext = m->m_ext;
 1022                 if(!m->m_ext.ext_ref)
 1023                         mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
 1024                 else
 1025                         (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
 1026                                                 m->m_ext.ext_size);
 1027                 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
 1028                 n->m_data = m->m_data + len;
 1029         } else {
 1030                 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
 1031         }
 1032         n->m_len = remain;
 1033         m->m_len = len;
 1034         n->m_next = m->m_next;
 1035         m->m_next = 0;
 1036         return (n);
 1037 }
 1038 /*
 1039  * Routine to copy from device local memory into mbufs.
 1040  */
 1041 struct mbuf *
 1042 m_devget(buf, totlen, off0, ifp, copy)
 1043         char *buf;
 1044         int totlen, off0;
 1045         struct ifnet *ifp;
 1046         void (*copy) __P((char *from, caddr_t to, u_int len));
 1047 {
 1048         register struct mbuf *m;
 1049         struct mbuf *top = 0, **mp = &top;
 1050         register int off = off0, len;
 1051         register char *cp;
 1052         char *epkt;
 1053 
 1054         cp = buf;
 1055         epkt = cp + totlen;
 1056         if (off) {
 1057                 cp += off + 2 * sizeof(u_short);
 1058                 totlen -= 2 * sizeof(u_short);
 1059         }
 1060         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1061         if (m == 0)
 1062                 return (0);
 1063         m->m_pkthdr.rcvif = ifp;
 1064         m->m_pkthdr.len = totlen;
 1065         m->m_len = MHLEN;
 1066 
 1067         while (totlen > 0) {
 1068                 if (top) {
 1069                         MGET(m, M_DONTWAIT, MT_DATA);
 1070                         if (m == 0) {
 1071                                 m_freem(top);
 1072                                 return (0);
 1073                         }
 1074                         m->m_len = MLEN;
 1075                 }
 1076                 len = min(totlen, epkt - cp);
 1077                 if (len >= MINCLSIZE) {
 1078                         MCLGET(m, M_DONTWAIT);
 1079                         if (m->m_flags & M_EXT)
 1080                                 m->m_len = len = min(len, MCLBYTES);
 1081                         else
 1082                                 len = m->m_len;
 1083                 } else {
 1084                         /*
 1085                          * Place initial small packet/header at end of mbuf.
 1086                          */
 1087                         if (len < m->m_len) {
 1088                                 if (top == 0 && len + max_linkhdr <= m->m_len)
 1089                                         m->m_data += max_linkhdr;
 1090                                 m->m_len = len;
 1091                         } else
 1092                                 len = m->m_len;
 1093                 }
 1094                 if (copy)
 1095                         copy(cp, mtod(m, caddr_t), (unsigned)len);
 1096                 else
 1097                         bcopy(cp, mtod(m, caddr_t), (unsigned)len);
 1098                 cp += len;
 1099                 *mp = m;
 1100                 mp = &m->m_next;
 1101                 totlen -= len;
 1102                 if (cp == epkt)
 1103                         cp = buf;
 1104         }
 1105         return (top);
 1106 }
 1107 
 1108 /*
 1109  * Copy data from a buffer back into the indicated mbuf chain,
 1110  * starting "off" bytes from the beginning, extending the mbuf
 1111  * chain if necessary.
 1112  */
 1113 void
 1114 m_copyback(m0, off, len, cp)
 1115         struct  mbuf *m0;
 1116         register int off;
 1117         register int len;
 1118         caddr_t cp;
 1119 {
 1120         register int mlen;
 1121         register struct mbuf *m = m0, *n;
 1122         int totlen = 0;
 1123 
 1124         if (m0 == 0)
 1125                 return;
 1126         while (off > (mlen = m->m_len)) {
 1127                 off -= mlen;
 1128                 totlen += mlen;
 1129                 if (m->m_next == 0) {
 1130                         n = m_getclr(M_DONTWAIT, m->m_type);
 1131                         if (n == 0)
 1132                                 goto out;
 1133                         n->m_len = min(MLEN, len + off);
 1134                         m->m_next = n;
 1135                 }
 1136                 m = m->m_next;
 1137         }
 1138         while (len > 0) {
 1139                 mlen = min (m->m_len - off, len);
 1140                 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
 1141                 cp += mlen;
 1142                 len -= mlen;
 1143                 mlen += off;
 1144                 off = 0;
 1145                 totlen += mlen;
 1146                 if (len == 0)
 1147                         break;
 1148                 if (m->m_next == 0) {
 1149                         n = m_get(M_DONTWAIT, m->m_type);
 1150                         if (n == 0)
 1151                                 break;
 1152                         n->m_len = min(MLEN, len);
 1153                         m->m_next = n;
 1154                 }
 1155                 m = m->m_next;
 1156         }
 1157 out:    if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
 1158                 m->m_pkthdr.len = totlen;
 1159 }

Cache object: 89605f036febb9043a040f522775ea90


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.