The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_mbuf.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2004, 2005,
    3  *      Bosko Milekic <bmilekic@FreeBSD.org>.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice unmodified, this list of conditions and the following
   10  *    disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD: releng/6.3/sys/kern/kern_mbuf.c 174279 2007-12-05 00:47:48Z rwatson $");
   30 
   31 #include "opt_mac.h"
   32 #include "opt_param.h"
   33 
   34 #include <sys/param.h>
   35 #include <sys/mac.h>
   36 #include <sys/malloc.h>
   37 #include <sys/systm.h>
   38 #include <sys/mbuf.h>
   39 #include <sys/domain.h>
   40 #include <sys/eventhandler.h>
   41 #include <sys/kernel.h>
   42 #include <sys/protosw.h>
   43 #include <sys/smp.h>
   44 #include <sys/sysctl.h>
   45 
   46 #include <vm/vm.h>
   47 #include <vm/vm_page.h>
   48 #include <vm/uma.h>
   49 #include <vm/uma_int.h>
   50 #include <vm/uma_dbg.h>
   51 
   52 /*
   53  * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA
   54  * Zones.
   55  *
   56  * Mbuf Clusters (2K, contiguous) are allocated from the Cluster
   57  * Zone.  The Zone can be capped at kern.ipc.nmbclusters, if the
   58  * administrator so desires.
   59  *
   60  * Mbufs are allocated from a UMA Master Zone called the Mbuf
   61  * Zone.
   62  *
   63  * Additionally, FreeBSD provides a Packet Zone, which it
   64  * configures as a Secondary Zone to the Mbuf Master Zone,
   65  * thus sharing backend Slab kegs with the Mbuf Master Zone.
   66  *
   67  * Thus common-case allocations and locking are simplified:
   68  *
   69  *  m_clget()                m_getcl()
   70  *    |                         |
   71  *    |   .------------>[(Packet Cache)]    m_get(), m_gethdr()
   72  *    |   |             [     Packet   ]            |
   73  *  [(Cluster Cache)]   [    Secondary ]   [ (Mbuf Cache)     ]
   74  *  [ Cluster Zone  ]   [     Zone     ]   [ Mbuf Master Zone ]
   75  *        |                       \________         |
   76  *  [ Cluster Keg   ]                      \       /
   77  *        |                              [ Mbuf Keg   ]
   78  *  [ Cluster Slabs ]                         |
   79  *        |                              [ Mbuf Slabs ]
   80  *         \____________(VM)_________________/
   81  *
   82  *
   83  * Whenever an object is allocated with uma_zalloc() out of
   84  * one of the Zones its _ctor_ function is executed.  The same
   85  * for any deallocation through uma_zfree() the _dtor_ function
   86  * is executed.
   87  *
   88  * Caches are per-CPU and are filled from the Master Zone.
   89  *
   90  * Whenever an object is allocated from the underlying global
   91  * memory pool it gets pre-initialized with the _zinit_ functions.
   92  * When the Keg's are overfull objects get decomissioned with
   93  * _zfini_ functions and free'd back to the global memory pool.
   94  *
   95  */
   96 
   97 int nmbclusters;                /* limits number of mbuf clusters */
   98 int nmbjumbop;                  /* limits number of page size jumbo clusters */
   99 int nmbjumbo9;                  /* limits number of 9k jumbo clusters */
  100 int nmbjumbo16;                 /* limits number of 16k jumbo clusters */
  101 struct mbstat mbstat;
  102 
  103 static void
  104 tunable_mbinit(void *dummy)
  105 {
  106 
  107         /* This has to be done before VM init. */
  108         nmbclusters = 1024 + maxusers * 64;
  109         TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
  110 }
  111 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
  112 
  113 SYSCTL_DECL(_kern_ipc);
  114 static int
  115 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
  116 {
  117         int error, newnmbclusters;
  118 
  119         newnmbclusters = nmbclusters;
  120         error = sysctl_handle_int(oidp, &newnmbclusters, sizeof(int), req); 
  121         if (error == 0 && req->newptr) {
  122                 if (newnmbclusters > nmbclusters) {
  123                         nmbclusters = newnmbclusters;
  124                         uma_zone_set_max(zone_clust, nmbclusters);
  125                         EVENTHANDLER_INVOKE(nmbclusters_change);
  126                 } else
  127                         error = EINVAL;
  128         }
  129         return (error);
  130 }
  131 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW,
  132     &nmbclusters, 0, sysctl_nmbclusters, "IU",
  133     "Maximum number of mbuf clusters allowed");
  134 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbjumbop, CTLFLAG_RW, &nmbjumbop, 0,
  135     "Maximum number of mbuf page size jumbo clusters allowed");
  136 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbjumbo9, CTLFLAG_RW, &nmbjumbo9, 0,
  137     "Maximum number of mbuf 9k jumbo clusters allowed");
  138 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbjumbo16, CTLFLAG_RW, &nmbjumbo16, 0,
  139     "Maximum number of mbuf 16k jumbo clusters allowed");
  140 SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat,
  141     "Mbuf general information and statistics");
  142 
  143 /*
  144  * Zones from which we allocate.
  145  */
  146 uma_zone_t      zone_mbuf;
  147 uma_zone_t      zone_clust;
  148 uma_zone_t      zone_pack;
  149 uma_zone_t      zone_jumbop;
  150 uma_zone_t      zone_jumbo9;
  151 uma_zone_t      zone_jumbo16;
  152 
  153 /*
  154  * Local prototypes.
  155  */
  156 static int      mb_ctor_mbuf(void *, int, void *, int);
  157 static int      mb_ctor_clust(void *, int, void *, int);
  158 static int      mb_ctor_pack(void *, int, void *, int);
  159 static void     mb_dtor_mbuf(void *, int, void *);
  160 static void     mb_dtor_clust(void *, int, void *);
  161 static void     mb_dtor_pack(void *, int, void *);
  162 static int      mb_zinit_pack(void *, int, int);
  163 static void     mb_zfini_pack(void *, int);
  164 
  165 static void     mb_reclaim(void *);
  166 static void     mbuf_init(void *);
  167 
  168 /* Ensure that MSIZE doesn't break dtom() - it must be a power of 2 */
  169 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
  170 
  171 /*
  172  * Initialize FreeBSD Network buffer allocation.
  173  */
  174 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL)
  175 static void
  176 mbuf_init(void *dummy)
  177 {
  178 
  179         /*
  180          * Configure UMA zones for Mbufs, Clusters, and Packets.
  181          */
  182         zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
  183             mb_ctor_mbuf, mb_dtor_mbuf,
  184 #ifdef INVARIANTS
  185             trash_init, trash_fini,
  186 #else
  187             NULL, NULL,
  188 #endif
  189             MSIZE - 1, UMA_ZONE_MAXBUCKET);
  190 
  191         zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
  192             mb_ctor_clust, mb_dtor_clust,
  193 #ifdef INVARIANTS
  194             trash_init, trash_fini,
  195 #else
  196             NULL, NULL,
  197 #endif
  198             UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
  199 
  200         if (nmbclusters > 0)
  201                 uma_zone_set_max(zone_clust, nmbclusters);
  202 
  203         zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
  204             mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
  205 
  206         /* Make jumbo frame zone too. Page size, 9k and 16k. */
  207         zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
  208             mb_ctor_clust, mb_dtor_clust,
  209 #ifdef INVARIANTS
  210             trash_init, trash_fini,
  211 #else
  212             NULL, NULL,
  213 #endif
  214             UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
  215         if (nmbjumbop > 0)
  216                 uma_zone_set_max(zone_jumbop, nmbjumbop);
  217 
  218         zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
  219             mb_ctor_clust, mb_dtor_clust,
  220 #ifdef INVARIANTS
  221             trash_init, trash_fini,
  222 #else
  223             NULL, NULL,
  224 #endif
  225             UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
  226         if (nmbjumbo9 > 0)
  227                 uma_zone_set_max(zone_jumbo9, nmbjumbo9);
  228 
  229         zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
  230             mb_ctor_clust, mb_dtor_clust,
  231 #ifdef INVARIANTS
  232             trash_init, trash_fini,
  233 #else
  234             NULL, NULL,
  235 #endif
  236             UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
  237         if (nmbjumbo16 > 0)
  238                 uma_zone_set_max(zone_jumbo16, nmbjumbo16);
  239 
  240         /* uma_prealloc() goes here... */
  241 
  242         /*
  243          * Hook event handler for low-memory situation, used to
  244          * drain protocols and push data back to the caches (UMA
  245          * later pushes it back to VM).
  246          */
  247         EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
  248             EVENTHANDLER_PRI_FIRST);
  249 
  250         /*
  251          * [Re]set counters and local statistics knobs.
  252          * XXX Some of these should go and be replaced, but UMA stat
  253          * gathering needs to be revised.
  254          */
  255         mbstat.m_mbufs = 0;
  256         mbstat.m_mclusts = 0;
  257         mbstat.m_drain = 0;
  258         mbstat.m_msize = MSIZE;
  259         mbstat.m_mclbytes = MCLBYTES;
  260         mbstat.m_minclsize = MINCLSIZE;
  261         mbstat.m_mlen = MLEN;
  262         mbstat.m_mhlen = MHLEN;
  263         mbstat.m_numtypes = MT_NTYPES;
  264 
  265         mbstat.m_mcfail = mbstat.m_mpfail = 0;
  266         mbstat.sf_iocnt = 0;
  267         mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
  268 }
  269 
  270 /*
  271  * Constructor for Mbuf master zone.
  272  *
  273  * The 'arg' pointer points to a mb_args structure which
  274  * contains call-specific information required to support the
  275  * mbuf allocation API.  See mbuf.h.
  276  */
  277 static int
  278 mb_ctor_mbuf(void *mem, int size, void *arg, int how)
  279 {
  280         struct mbuf *m;
  281         struct mb_args *args;
  282 #ifdef MAC
  283         int error;
  284 #endif
  285         int flags;
  286         short type;
  287 
  288 #ifdef INVARIANTS
  289         trash_ctor(mem, size, arg, how);
  290 #endif
  291         m = (struct mbuf *)mem;
  292         args = (struct mb_args *)arg;
  293         flags = args->flags;
  294         type = args->type;
  295 
  296         /*
  297          * The mbuf is initialized later.  The caller has the
  298          * responsibility to set up any MAC labels too.
  299          */
  300         if (type == MT_NOINIT)
  301                 return (0);
  302 
  303         m->m_next = NULL;
  304         m->m_nextpkt = NULL;
  305         m->m_len = 0;
  306         m->m_flags = flags;
  307         m->m_type = type;
  308         if (flags & M_PKTHDR) {
  309                 m->m_data = m->m_pktdat;
  310                 m->m_pkthdr.rcvif = NULL;
  311                 m->m_pkthdr.len = 0;
  312                 m->m_pkthdr.header = NULL;
  313                 m->m_pkthdr.csum_flags = 0;
  314                 m->m_pkthdr.csum_data = 0;
  315                 SLIST_INIT(&m->m_pkthdr.tags);
  316 #ifdef MAC
  317                 /* If the label init fails, fail the alloc */
  318                 error = mac_init_mbuf(m, how);
  319                 if (error)
  320                         return (error);
  321 #endif
  322         } else
  323                 m->m_data = m->m_dat;
  324         mbstat.m_mbufs += 1;    /* XXX */
  325         return (0);
  326 }
  327 
  328 /*
  329  * The Mbuf master zone destructor.
  330  */
  331 static void
  332 mb_dtor_mbuf(void *mem, int size, void *arg)
  333 {
  334         struct mbuf *m;
  335 
  336         m = (struct mbuf *)mem;
  337         if ((m->m_flags & M_PKTHDR) != 0)
  338                 m_tag_delete_chain(m, NULL);
  339 #ifdef INVARIANTS
  340         trash_dtor(mem, size, arg);
  341 #endif
  342         mbstat.m_mbufs -= 1;    /* XXX */
  343 }
  344 
  345 /*
  346  * The Mbuf Packet zone destructor.
  347  */
  348 static void
  349 mb_dtor_pack(void *mem, int size, void *arg)
  350 {
  351         struct mbuf *m;
  352 
  353         m = (struct mbuf *)mem;
  354         if ((m->m_flags & M_PKTHDR) != 0)
  355                 m_tag_delete_chain(m, NULL);
  356 
  357         /* Make sure we've got a clean cluster back. */
  358         KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
  359         KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
  360         KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
  361         KASSERT(m->m_ext.ext_args == NULL, ("%s: ext_args != NULL", __func__));
  362         KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
  363         KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__));
  364 #ifdef INVARIANTS
  365         trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
  366 #endif
  367         mbstat.m_mbufs -= 1;    /* XXX */
  368         mbstat.m_mclusts -= 1;  /* XXX */
  369         /*
  370          * If there are processes blocked on zone_clust, waiting for pages to be freed up,
  371          * cause them to be woken up by draining the packet zone. Draining the cluster zone
  372          * is unnecessary here (as freeing clusters would have caused these blocked processes
  373          * to be woken up). We are exposed to a race here (in the check for the UMA_ZFLAG_FULL)
  374          * where we might miss the flag set, but that is deliberate. We don't want to acquire 
  375          * the zone lock for every mbuf free.
  376          */
  377         if (uma_zone_exhausted_nolock(zone_clust))
  378                 zone_drain(zone_pack);
  379 }
  380 
  381 /*
  382  * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor.
  383  *
  384  * Here the 'arg' pointer points to the Mbuf which we
  385  * are configuring cluster storage for.  If 'arg' is
  386  * empty we allocate just the cluster without setting
  387  * the mbuf to it.  See mbuf.h.
  388  */
  389 static int
  390 mb_ctor_clust(void *mem, int size, void *arg, int how)
  391 {
  392         struct mbuf *m;
  393         int type = 0;
  394 
  395 #ifdef INVARIANTS
  396         trash_ctor(mem, size, arg, how);
  397 #endif
  398         m = (struct mbuf *)arg;
  399         if (m != NULL) {
  400                 switch (size) {
  401                 case MCLBYTES:
  402                         type = EXT_CLUSTER;
  403                         break;
  404 #if MJUMPAGESIZE != MCLBYTES
  405                 case MJUMPAGESIZE:
  406                         type = EXT_JUMBOP;
  407                         break;
  408 #endif
  409                 case MJUM9BYTES:
  410                         type = EXT_JUMBO9;
  411                         break;
  412                 case MJUM16BYTES:
  413                         type = EXT_JUMBO16;
  414                         break;
  415                 default:
  416                         panic("unknown cluster size");
  417                         break;
  418                 }
  419                 m->m_ext.ext_buf = (caddr_t)mem;
  420                 m->m_data = m->m_ext.ext_buf;
  421                 m->m_flags |= M_EXT;
  422                 m->m_ext.ext_free = NULL;
  423                 m->m_ext.ext_args = NULL;
  424                 m->m_ext.ext_size = size;
  425                 m->m_ext.ext_type = type;
  426                 m->m_ext.ref_cnt = NULL;        /* Lazy counter assign. */
  427         }
  428         mbstat.m_mclusts += 1;  /* XXX */
  429         return (0);
  430 }
  431 
  432 /*
  433  * The Mbuf Cluster zone destructor.
  434  */
  435 static void
  436 mb_dtor_clust(void *mem, int size, void *arg)
  437 {
  438 #ifdef INVARIANTS
  439         trash_dtor(mem, size, arg);
  440 #endif
  441         mbstat.m_mclusts -= 1;  /* XXX */
  442 }
  443 
  444 /*
  445  * The Packet secondary zone's init routine, executed on the
  446  * object's transition from mbuf keg slab to zone cache.
  447  */
  448 static int
  449 mb_zinit_pack(void *mem, int size, int how)
  450 {
  451         struct mbuf *m;
  452 
  453         m = (struct mbuf *)mem;
  454         if (uma_zalloc_arg(zone_clust, m, how) == NULL ||
  455             m->m_ext.ext_buf == NULL)
  456                 return (ENOMEM);
  457         m->m_ext.ext_type = EXT_PACKET; /* Override. */
  458 #ifdef INVARIANTS
  459         trash_init(m->m_ext.ext_buf, MCLBYTES, how);
  460 #endif
  461         mbstat.m_mclusts -= 1;  /* XXX */
  462         return (0);
  463 }
  464 
  465 /*
  466  * The Packet secondary zone's fini routine, executed on the
  467  * object's transition from zone cache to keg slab.
  468  */
  469 static void
  470 mb_zfini_pack(void *mem, int size)
  471 {
  472         struct mbuf *m;
  473 
  474         m = (struct mbuf *)mem;
  475 #ifdef INVARIANTS
  476         trash_fini(m->m_ext.ext_buf, MCLBYTES);
  477 #endif
  478         uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
  479         m->m_ext.ext_buf = NULL;
  480         mbstat.m_mclusts += 1;  /* XXX */
  481 #ifdef INVARIANTS
  482         trash_dtor(mem, size, NULL);
  483 #endif
  484 }
  485 
  486 /*
  487  * The "packet" keg constructor.
  488  */
  489 static int
  490 mb_ctor_pack(void *mem, int size, void *arg, int how)
  491 {
  492         struct mbuf *m;
  493         struct mb_args *args;
  494 #ifdef MAC
  495         int error;
  496 #endif
  497         int flags;
  498         short type;
  499 
  500         m = (struct mbuf *)mem;
  501         args = (struct mb_args *)arg;
  502         flags = args->flags;
  503         type = args->type;
  504 
  505 #ifdef INVARIANTS
  506         trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
  507 #endif
  508         m->m_next = NULL;
  509         m->m_nextpkt = NULL;
  510         m->m_data = m->m_ext.ext_buf;
  511         m->m_len = 0;
  512         m->m_flags = (flags | M_EXT);
  513         m->m_type = type;
  514         m->m_ext.ref_cnt = NULL;        /* Lazy counter assign. */
  515 
  516         if (flags & M_PKTHDR) {
  517                 m->m_pkthdr.rcvif = NULL;
  518                 m->m_pkthdr.len = 0;
  519                 m->m_pkthdr.header = NULL;
  520                 m->m_pkthdr.csum_flags = 0;
  521                 m->m_pkthdr.csum_data = 0;
  522                 SLIST_INIT(&m->m_pkthdr.tags);
  523 #ifdef MAC
  524                 /* If the label init fails, fail the alloc */
  525                 error = mac_init_mbuf(m, how);
  526                 if (error)
  527                         return (error);
  528 #endif
  529         }
  530         /* m_ext is already initialized. */
  531 
  532         mbstat.m_mbufs += 1;    /* XXX */
  533         mbstat.m_mclusts += 1;  /* XXX */
  534         return (0);
  535 }
  536 
  537 /*
  538  * This is the protocol drain routine.
  539  *
  540  * No locks should be held when this is called.  The drain routines have to
  541  * presently acquire some locks which raises the possibility of lock order
  542  * reversal.
  543  */
  544 static void
  545 mb_reclaim(void *junk)
  546 {
  547         struct domain *dp;
  548         struct protosw *pr;
  549 
  550         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL,
  551             "mb_reclaim()");
  552 
  553         NET_LOCK_GIANT();
  554         mbstat.m_drain++;
  555         for (dp = domains; dp != NULL; dp = dp->dom_next)
  556                 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
  557                         if (pr->pr_drain != NULL)
  558                                 (*pr->pr_drain)();
  559         NET_UNLOCK_GIANT();
  560 }

Cache object: 3d69091dc2932c05e2a1ece5e5006ed2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.