The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/net/ifq_var.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2005 The DragonFly Project.  All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  *
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in
   12  *    the documentation and/or other materials provided with the
   13  *    distribution.
   14  * 3. Neither the name of The DragonFly Project nor the names of its
   15  *    contributors may be used to endorse or promote products derived
   16  *    from this software without specific, prior written permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
   22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   23  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
   24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
   25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
   26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
   27  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
   28  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  */
   31 
   32 #ifndef _NET_IFQ_VAR_H_
   33 #define _NET_IFQ_VAR_H_
   34 
   35 #ifndef _KERNEL
   36 #error "This file should not be included by userland programs."
   37 #endif
   38 
   39 #ifndef _SYS_SYSTM_H_
   40 #include <sys/systm.h>
   41 #endif
   42 #ifndef _SYS_THREAD2_H_
   43 #include <sys/thread2.h>
   44 #endif
   45 #ifndef _SYS_SERIALIZE_H_
   46 #include <sys/serialize.h>
   47 #endif
   48 #ifndef _SYS_MBUF_H_
   49 #include <sys/mbuf.h>
   50 #endif
   51 #ifndef _NET_IF_VAR_H_
   52 #include <net/if_var.h>
   53 #endif
   54 #ifndef _NET_ALTQ_IF_ALTQ_H_
   55 #include <net/altq/if_altq.h>
   56 #endif
   57 
   58 #define ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq) \
   59         KASSERT(ifsq_get_ifp((ifsq)) == (ifp) && \
   60             ifsq_get_index((ifsq)) == ALTQ_SUBQ_INDEX_DEFAULT, \
   61             ("not ifp's default subqueue"));
   62 
   63 struct ifaltq;
   64 struct ifaltq_subque;
   65 
   66 /*
   67  * Subqueue watchdog
   68  */
   69 typedef void    (*ifsq_watchdog_t)(struct ifaltq_subque *);
   70 
   71 struct ifsubq_watchdog {
   72         struct callout  wd_callout;
   73         int             wd_timer;
   74         struct ifaltq_subque *wd_subq;
   75         ifsq_watchdog_t wd_watchdog;
   76 };
   77 
   78 /*
   79  * Support for "classic" ALTQ interfaces.
   80  */
   81 int             ifsq_classic_enqueue(struct ifaltq_subque *, struct mbuf *,
   82                     struct altq_pktattr *);
   83 struct mbuf     *ifsq_classic_dequeue(struct ifaltq_subque *, int);
   84 int             ifsq_classic_request(struct ifaltq_subque *, int, void *);
   85 void            ifq_set_classic(struct ifaltq *);
   86 
   87 void            ifq_set_maxlen(struct ifaltq *, int);
   88 void            ifq_set_methods(struct ifaltq *, altq_mapsubq_t,
   89                     ifsq_enqueue_t, ifsq_dequeue_t, ifsq_request_t);
   90 int             ifq_mapsubq_default(struct ifaltq *, int);
   91 int             ifq_mapsubq_mask(struct ifaltq *, int);
   92 
   93 void            ifsq_devstart(struct ifaltq_subque *ifsq);
   94 void            ifsq_devstart_sched(struct ifaltq_subque *ifsq);
   95 
   96 void            ifsq_watchdog_init(struct ifsubq_watchdog *,
   97                     struct ifaltq_subque *, ifsq_watchdog_t);
   98 void            ifsq_watchdog_start(struct ifsubq_watchdog *);
   99 void            ifsq_watchdog_stop(struct ifsubq_watchdog *);
  100 
  101 /*
  102  * Dispatch a packet to an interface.
  103  */
  104 int             ifq_dispatch(struct ifnet *, struct mbuf *,
  105                     struct altq_pktattr *);
  106 
  107 #ifdef ALTQ
  108 
  109 static __inline int
  110 ifq_is_enabled(struct ifaltq *_ifq)
  111 {
  112         return(_ifq->altq_flags & ALTQF_ENABLED);
  113 }
  114 
  115 static __inline int
  116 ifq_is_attached(struct ifaltq *_ifq)
  117 {
  118         return(_ifq->altq_disc != NULL);
  119 }
  120 
  121 #else   /* !ALTQ */
  122 
  123 static __inline int
  124 ifq_is_enabled(struct ifaltq *_ifq)
  125 {
  126         return(0);
  127 }
  128 
  129 static __inline int
  130 ifq_is_attached(struct ifaltq *_ifq)
  131 {
  132         return(0);
  133 }
  134 
  135 #endif  /* ALTQ */
  136 
  137 static __inline int
  138 ifq_is_ready(struct ifaltq *_ifq)
  139 {
  140         return(_ifq->altq_flags & ALTQF_READY);
  141 }
  142 
  143 static __inline void
  144 ifq_set_ready(struct ifaltq *_ifq)
  145 {
  146         _ifq->altq_flags |= ALTQF_READY;
  147 }
  148 
  149 /*
  150  * Subqueue lock must be held
  151  */
  152 static __inline int
  153 ifsq_enqueue_locked(struct ifaltq_subque *_ifsq, struct mbuf *_m,
  154     struct altq_pktattr *_pa)
  155 {
  156 #ifdef ALTQ
  157         if (!ifq_is_enabled(_ifsq->ifsq_altq))
  158                 return ifsq_classic_enqueue(_ifsq, _m, _pa);
  159         else
  160 #endif
  161         return _ifsq->ifsq_enqueue(_ifsq, _m, _pa);
  162 }
  163 
  164 static __inline int
  165 ifsq_enqueue(struct ifaltq_subque *_ifsq, struct mbuf *_m,
  166     struct altq_pktattr *_pa)
  167 {
  168         int _error;
  169 
  170         ALTQ_SQ_LOCK(_ifsq);
  171         _error = ifsq_enqueue_locked(_ifsq, _m, _pa);
  172         ALTQ_SQ_UNLOCK(_ifsq);
  173         return _error;
  174 }
  175 
  176 static __inline struct mbuf *
  177 ifsq_dequeue(struct ifaltq_subque *_ifsq)
  178 {
  179         struct mbuf *_m;
  180 
  181         ALTQ_SQ_LOCK(_ifsq);
  182         if (_ifsq->ifsq_prepended != NULL) {
  183                 _m = _ifsq->ifsq_prepended;
  184                 _ifsq->ifsq_prepended = NULL;
  185                 ALTQ_SQ_CNTR_DEC(_ifsq, _m->m_pkthdr.len);
  186                 ALTQ_SQ_UNLOCK(_ifsq);
  187                 return _m;
  188         }
  189 
  190 #ifdef ALTQ
  191         if (_ifsq->ifsq_altq->altq_tbr != NULL)
  192                 _m = tbr_dequeue(_ifsq, ALTDQ_REMOVE);
  193         else if (!ifq_is_enabled(_ifsq->ifsq_altq))
  194                 _m = ifsq_classic_dequeue(_ifsq, ALTDQ_REMOVE);
  195         else
  196 #endif
  197         _m = _ifsq->ifsq_dequeue(_ifsq, ALTDQ_REMOVE);
  198         ALTQ_SQ_UNLOCK(_ifsq);
  199         return _m;
  200 }
  201 
  202 /*
  203  * Subqueue lock must be held
  204  */
  205 static __inline struct mbuf *
  206 ifsq_poll_locked(struct ifaltq_subque *_ifsq)
  207 {
  208         if (_ifsq->ifsq_prepended != NULL)
  209                 return _ifsq->ifsq_prepended;
  210 
  211 #ifdef ALTQ
  212         if (_ifsq->ifsq_altq->altq_tbr != NULL)
  213                 return tbr_dequeue(_ifsq, ALTDQ_POLL);
  214         else if (!ifq_is_enabled(_ifsq->ifsq_altq))
  215                 return ifsq_classic_dequeue(_ifsq, ALTDQ_POLL);
  216         else
  217 #endif
  218         return _ifsq->ifsq_dequeue(_ifsq, ALTDQ_POLL);
  219 }
  220 
  221 static __inline struct mbuf *
  222 ifsq_poll(struct ifaltq_subque *_ifsq)
  223 {
  224         struct mbuf *_m;
  225 
  226         ALTQ_SQ_LOCK(_ifsq);
  227         _m = ifsq_poll_locked(_ifsq);
  228         ALTQ_SQ_UNLOCK(_ifsq);
  229         return _m;
  230 }
  231 
  232 static __inline int
  233 ifsq_poll_pktlen(struct ifaltq_subque *_ifsq)
  234 {
  235         struct mbuf *_m;
  236         int _len = 0;
  237 
  238         ALTQ_SQ_LOCK(_ifsq);
  239 
  240         _m = ifsq_poll_locked(_ifsq);
  241         if (_m != NULL) {
  242                 M_ASSERTPKTHDR(_m);
  243                 _len = _m->m_pkthdr.len;
  244         }
  245 
  246         ALTQ_SQ_UNLOCK(_ifsq);
  247 
  248         return _len;
  249 }
  250 
  251 /*
  252  * Subqueue lock must be held
  253  */
  254 static __inline void
  255 ifsq_purge_locked(struct ifaltq_subque *_ifsq)
  256 {
  257         if (_ifsq->ifsq_prepended != NULL) {
  258                 ALTQ_SQ_CNTR_DEC(_ifsq, _ifsq->ifsq_prepended->m_pkthdr.len);
  259                 m_freem(_ifsq->ifsq_prepended);
  260                 _ifsq->ifsq_prepended = NULL;
  261         }
  262 
  263 #ifdef ALTQ
  264         if (!ifq_is_enabled(_ifsq->ifsq_altq))
  265                 ifsq_classic_request(_ifsq, ALTRQ_PURGE, NULL);
  266         else
  267 #endif
  268         _ifsq->ifsq_request(_ifsq, ALTRQ_PURGE, NULL);
  269 }
  270 
  271 static __inline void
  272 ifsq_purge(struct ifaltq_subque *_ifsq)
  273 {
  274         ALTQ_SQ_LOCK(_ifsq);
  275         ifsq_purge_locked(_ifsq);
  276         ALTQ_SQ_UNLOCK(_ifsq);
  277 }
  278 
  279 static __inline void
  280 ifq_lock_all(struct ifaltq *_ifq)
  281 {
  282         int _q;
  283 
  284         for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q)
  285                 ALTQ_SQ_LOCK(&_ifq->altq_subq[_q]);
  286 }
  287 
  288 static __inline void
  289 ifq_unlock_all(struct ifaltq *_ifq)
  290 {
  291         int _q;
  292 
  293         for (_q = _ifq->altq_subq_cnt - 1; _q >= 0; --_q)
  294                 ALTQ_SQ_UNLOCK(&_ifq->altq_subq[_q]);
  295 }
  296 
  297 /*
  298  * All of the subqueue locks must be held
  299  */
  300 static __inline void
  301 ifq_purge_all_locked(struct ifaltq *_ifq)
  302 {
  303         int _q;
  304 
  305         for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q)
  306                 ifsq_purge_locked(&_ifq->altq_subq[_q]);
  307 }
  308 
  309 static __inline void
  310 ifq_purge_all(struct ifaltq *_ifq)
  311 {
  312         ifq_lock_all(_ifq);
  313         ifq_purge_all_locked(_ifq);
  314         ifq_unlock_all(_ifq);
  315 }
  316 
  317 static __inline void
  318 ifq_classify(struct ifaltq *_ifq, struct mbuf *_m, uint8_t _af,
  319     struct altq_pktattr *_pa)
  320 {
  321 #ifdef ALTQ
  322         if (ifq_is_enabled(_ifq)) {
  323                 _pa->pattr_af = _af;
  324                 _pa->pattr_hdr = mtod(_m, caddr_t);
  325                 if (ifq_is_enabled(_ifq) &&
  326                     (_ifq->altq_flags & ALTQF_CLASSIFY)) {
  327                         /* XXX default subqueue */
  328                         struct ifaltq_subque *_ifsq =
  329                             &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
  330 
  331                         ALTQ_SQ_LOCK(_ifsq);
  332                         if (ifq_is_enabled(_ifq) &&
  333                             (_ifq->altq_flags & ALTQF_CLASSIFY))
  334                                 _ifq->altq_classify(_ifq, _m, _pa);
  335                         ALTQ_SQ_UNLOCK(_ifsq);
  336                 }
  337         }
  338 #endif
  339 }
  340 
  341 static __inline void
  342 ifsq_prepend(struct ifaltq_subque *_ifsq, struct mbuf *_m)
  343 {
  344         ALTQ_SQ_LOCK(_ifsq);
  345         KASSERT(_ifsq->ifsq_prepended == NULL, ("pending prepended mbuf"));
  346         _ifsq->ifsq_prepended = _m;
  347         ALTQ_SQ_CNTR_INC(_ifsq, _m->m_pkthdr.len);
  348         ALTQ_SQ_UNLOCK(_ifsq);
  349 }
  350 
  351 /*
  352  * Subqueue hardware serializer must be held
  353  */
  354 static __inline void
  355 ifsq_set_oactive(struct ifaltq_subque *_ifsq)
  356 {
  357         _ifsq->ifsq_hw_oactive = 1;
  358 }
  359 
  360 /*
  361  * Subqueue hardware serializer must be held
  362  */
  363 static __inline void
  364 ifsq_clr_oactive(struct ifaltq_subque *_ifsq)
  365 {
  366         _ifsq->ifsq_hw_oactive = 0;
  367 }
  368 
  369 /*
  370  * Subqueue hardware serializer must be held
  371  */
  372 static __inline int
  373 ifsq_is_oactive(const struct ifaltq_subque *_ifsq)
  374 {
  375         return _ifsq->ifsq_hw_oactive;
  376 }
  377 
  378 /*
  379  * Hand a packet to the interface's default subqueue.
  380  *
  381  * The default subqueue hardware serializer must be held.  If the
  382  * subqueue hardware serializer is not held yet, ifq_dispatch()
  383  * should be used to get better performance.
  384  */
  385 static __inline int
  386 ifq_handoff(struct ifnet *_ifp, struct mbuf *_m, struct altq_pktattr *_pa)
  387 {
  388         struct ifaltq_subque *_ifsq;
  389         int _error;
  390         int _qid = ALTQ_SUBQ_INDEX_DEFAULT; /* XXX default subqueue */
  391 
  392         _ifsq = &_ifp->if_snd.altq_subq[_qid];
  393 
  394         ASSERT_ALTQ_SQ_SERIALIZED_HW(_ifsq);
  395         _error = ifsq_enqueue(_ifsq, _m, _pa);
  396         if (_error == 0) {
  397                 IFNET_STAT_INC(_ifp, obytes, _m->m_pkthdr.len);
  398                 if (_m->m_flags & M_MCAST)
  399                         IFNET_STAT_INC(_ifp, omcasts, 1);
  400                 if (!ifsq_is_oactive(_ifsq))
  401                         (*_ifp->if_start)(_ifp, _ifsq);
  402         }
  403         return(_error);
  404 }
  405 
  406 static __inline int
  407 ifsq_is_empty(const struct ifaltq_subque *_ifsq)
  408 {
  409         return(_ifsq->ifsq_len == 0);
  410 }
  411 
  412 /*
  413  * Subqueue lock must be held
  414  */
  415 static __inline int
  416 ifsq_data_ready(struct ifaltq_subque *_ifsq)
  417 {
  418 #ifdef ALTQ
  419         if (_ifsq->ifsq_altq->altq_tbr != NULL)
  420                 return (ifsq_poll_locked(_ifsq) != NULL);
  421         else
  422 #endif
  423         return !ifsq_is_empty(_ifsq);
  424 }
  425 
  426 /*
  427  * Subqueue lock must be held
  428  */
  429 static __inline int
  430 ifsq_is_started(const struct ifaltq_subque *_ifsq)
  431 {
  432         return _ifsq->ifsq_started;
  433 }
  434 
  435 /*
  436  * Subqueue lock must be held
  437  */
  438 static __inline void
  439 ifsq_set_started(struct ifaltq_subque *_ifsq)
  440 {
  441         _ifsq->ifsq_started = 1;
  442 }
  443 
  444 /*
  445  * Subqueue lock must be held
  446  */
  447 static __inline void
  448 ifsq_clr_started(struct ifaltq_subque *_ifsq)
  449 {
  450         _ifsq->ifsq_started = 0;
  451 }
  452 
  453 static __inline struct ifsubq_stage *
  454 ifsq_get_stage(struct ifaltq_subque *_ifsq, int _cpuid)
  455 {
  456         return &_ifsq->ifsq_stage[_cpuid];
  457 }
  458 
  459 static __inline int
  460 ifsq_get_cpuid(const struct ifaltq_subque *_ifsq)
  461 {
  462         return _ifsq->ifsq_cpuid;
  463 }
  464 
  465 static __inline void
  466 ifsq_set_cpuid(struct ifaltq_subque *_ifsq, int _cpuid)
  467 {
  468         KASSERT(_cpuid >= 0 && _cpuid < ncpus,
  469             ("invalid ifsq_cpuid %d", _cpuid));
  470         _ifsq->ifsq_cpuid = _cpuid;
  471 }
  472 
  473 static __inline struct lwkt_msg *
  474 ifsq_get_ifstart_lmsg(struct ifaltq_subque *_ifsq, int _cpuid)
  475 {
  476         return &_ifsq->ifsq_ifstart_nmsg[_cpuid].lmsg;
  477 }
  478 
  479 static __inline int
  480 ifsq_get_index(const struct ifaltq_subque *_ifsq)
  481 {
  482         return _ifsq->ifsq_index;
  483 }
  484 
  485 static __inline void
  486 ifsq_set_priv(struct ifaltq_subque *_ifsq, void *_priv)
  487 {
  488         _ifsq->ifsq_hw_priv = _priv;
  489 }
  490 
  491 static __inline void *
  492 ifsq_get_priv(const struct ifaltq_subque *_ifsq)
  493 {
  494         return _ifsq->ifsq_hw_priv;
  495 }
  496 
  497 static __inline struct ifnet *
  498 ifsq_get_ifp(const struct ifaltq_subque *_ifsq)
  499 {
  500         return _ifsq->ifsq_ifp;
  501 }
  502 
  503 static __inline void
  504 ifsq_set_hw_serialize(struct ifaltq_subque *_ifsq,
  505     struct lwkt_serialize *_hwslz)
  506 {
  507         KASSERT(_hwslz != NULL, ("NULL hw serialize"));
  508         KASSERT(_ifsq->ifsq_hw_serialize == NULL,
  509             ("hw serialize has been setup"));
  510         _ifsq->ifsq_hw_serialize = _hwslz;
  511 }
  512 
  513 static __inline void
  514 ifsq_serialize_hw(struct ifaltq_subque *_ifsq)
  515 {
  516         lwkt_serialize_enter(_ifsq->ifsq_hw_serialize);
  517 }
  518 
  519 static __inline void
  520 ifsq_deserialize_hw(struct ifaltq_subque *_ifsq)
  521 {
  522         lwkt_serialize_exit(_ifsq->ifsq_hw_serialize);
  523 }
  524 
  525 static __inline int
  526 ifsq_tryserialize_hw(struct ifaltq_subque *_ifsq)
  527 {
  528         return lwkt_serialize_try(_ifsq->ifsq_hw_serialize);
  529 }
  530 
  531 static __inline struct ifaltq_subque *
  532 ifq_get_subq_default(const struct ifaltq *_ifq)
  533 {
  534         return &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
  535 }
  536 
  537 static __inline struct ifaltq_subque *
  538 ifq_get_subq(const struct ifaltq *_ifq, int _idx)
  539 {
  540         KASSERT(_idx >= 0 && _idx < _ifq->altq_subq_cnt,
  541             ("invalid qid %d", _idx));
  542         return &_ifq->altq_subq[_idx];
  543 }
  544 
  545 static __inline struct ifaltq_subque *
  546 ifq_map_subq(struct ifaltq *_ifq, int _cpuid)
  547 { 
  548         int _idx = _ifq->altq_mapsubq(_ifq, _cpuid);
  549         return ifq_get_subq(_ifq, _idx);
  550 }
  551 
  552 static __inline void
  553 ifq_set_subq_cnt(struct ifaltq *_ifq, int _cnt)
  554 {
  555         _ifq->altq_subq_cnt = _cnt;
  556 }
  557 
  558 static __inline void
  559 ifq_set_subq_mask(struct ifaltq *_ifq, uint32_t _mask)
  560 {
  561         KASSERT(((_mask + 1) & _mask) == 0, ("invalid mask %08x", _mask));
  562         _ifq->altq_subq_mask = _mask;
  563 }
  564 
  565 /* COMPAT */
  566 static __inline int
  567 ifq_is_oactive(const struct ifaltq *_ifq)
  568 {
  569         return ifsq_is_oactive(ifq_get_subq_default(_ifq));
  570 }
  571 
  572 /* COMPAT */
  573 static __inline void
  574 ifq_set_oactive(struct ifaltq *_ifq)
  575 {
  576         ifsq_set_oactive(ifq_get_subq_default(_ifq));
  577 }
  578 
  579 /* COMPAT */
  580 static __inline void
  581 ifq_clr_oactive(struct ifaltq *_ifq)
  582 {
  583         ifsq_clr_oactive(ifq_get_subq_default(_ifq));
  584 }
  585 
  586 /* COMPAT */
  587 static __inline int
  588 ifq_is_empty(struct ifaltq *_ifq)
  589 {
  590         return ifsq_is_empty(ifq_get_subq_default(_ifq));
  591 }
  592 
  593 /* COMPAT */
  594 static __inline void
  595 ifq_purge(struct ifaltq *_ifq)
  596 {
  597         ifsq_purge(ifq_get_subq_default(_ifq));
  598 }
  599 
  600 /* COMPAT */
  601 static __inline struct mbuf *
  602 ifq_dequeue(struct ifaltq *_ifq)
  603 {
  604         return ifsq_dequeue(ifq_get_subq_default(_ifq));
  605 }
  606 
  607 /* COMPAT */
  608 static __inline void
  609 ifq_prepend(struct ifaltq *_ifq, struct mbuf *_m)
  610 {
  611         ifsq_prepend(ifq_get_subq_default(_ifq), _m);
  612 }
  613 
  614 /* COMPAT */
  615 static __inline void
  616 ifq_set_cpuid(struct ifaltq *_ifq, int _cpuid)
  617 {
  618         KASSERT(_ifq->altq_subq_cnt == 1,
  619             ("invalid subqueue count %d", _ifq->altq_subq_cnt));
  620         ifsq_set_cpuid(ifq_get_subq_default(_ifq), _cpuid);
  621 }
  622 
  623 /* COMPAT */
  624 static __inline void
  625 ifq_set_hw_serialize(struct ifaltq *_ifq, struct lwkt_serialize *_hwslz)
  626 {
  627         KASSERT(_ifq->altq_subq_cnt == 1,
  628             ("invalid subqueue count %d", _ifq->altq_subq_cnt));
  629         ifsq_set_hw_serialize(ifq_get_subq_default(_ifq), _hwslz);
  630 }
  631 
  632 #endif  /* _NET_IFQ_VAR_H_ */

Cache object: 566a9ac760943d79cfc74ccb63fc7397


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.