The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet/ip_dummynet.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
    3  * Portions Copyright (c) 2000 Akamba Corp.
    4  * All rights reserved
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD$");
   30 
   31 #define DUMMYNET_DEBUG
   32 
   33 #include "opt_inet6.h"
   34 
   35 /*
   36  * This module implements IP dummynet, a bandwidth limiter/delay emulator
   37  * used in conjunction with the ipfw package.
   38  * Description of the data structures used is in ip_dummynet.h
   39  * Here you mainly find the following blocks of code:
   40  *  + variable declarations;
   41  *  + heap management functions;
   42  *  + scheduler and dummynet functions;
   43  *  + configuration and initialization.
   44  *
   45  * NOTA BENE: critical sections are protected by the "dummynet lock".
   46  *
   47  * Most important Changes:
   48  *
   49  * 011004: KLDable
   50  * 010124: Fixed WF2Q behaviour
   51  * 010122: Fixed spl protection.
   52  * 000601: WF2Q support
   53  * 000106: large rewrite, use heaps to handle very many pipes.
   54  * 980513:      initial release
   55  *
   56  * include files marked with XXX are probably not needed
   57  */
   58 
   59 #include <sys/limits.h>
   60 #include <sys/param.h>
   61 #include <sys/systm.h>
   62 #include <sys/malloc.h>
   63 #include <sys/mbuf.h>
   64 #include <sys/kernel.h>
   65 #include <sys/lock.h>
   66 #include <sys/module.h>
   67 #include <sys/mutex.h>
   68 #include <sys/priv.h>
   69 #include <sys/proc.h>
   70 #include <sys/socket.h>
   71 #include <sys/socketvar.h>
   72 #include <sys/time.h>
   73 #include <sys/sysctl.h>
   74 #include <sys/taskqueue.h>
   75 #include <net/if.h>     /* IFNAMSIZ, struct ifaddr, ifq head */
   76 #include <net/netisr.h>
   77 #include <netinet/in.h>
   78 #include <netinet/ip.h> /* ip_len, ip_off */
   79 #include <netinet/ip_fw.h>
   80 #include <netinet/ip_dummynet.h>
   81 #include <netinet/ip_var.h>     /* ip_output(), IP_FORWARDING */
   82 
   83 #include <netinet/if_ether.h> /* various ether_* routines */
   84 
   85 #include <netinet/ip6.h>       /* for ip6_input, ip6_output prototypes */
   86 #include <netinet6/ip6_var.h>
   87 
   88 /*
   89  * We keep a private variable for the simulation time, but we could
   90  * probably use an existing one ("softticks" in sys/kern/kern_timeout.c)
   91  */
   92 static dn_key curr_time = 0 ; /* current simulation time */
   93 
   94 static int dn_hash_size = 64 ;  /* default hash size */
   95 
   96 /* statistics on number of queue searches and search steps */
   97 static long searches, search_steps ;
   98 static int pipe_expire = 1 ;   /* expire queue if empty */
   99 static int dn_max_ratio = 16 ; /* max queues/buckets ratio */
  100 
  101 static long pipe_slot_limit = 100; /* Foot shooting limit for pipe queues. */
  102 static long pipe_byte_limit = 1024 * 1024;
  103 
  104 static int red_lookup_depth = 256;      /* RED - default lookup table depth */
  105 static int red_avg_pkt_size = 512;      /* RED - default medium packet size */
  106 static int red_max_pkt_size = 1500;     /* RED - default max packet size */
  107 
  108 static struct timeval prev_t, t;
  109 static long tick_last;                  /* Last tick duration (usec). */
  110 static long tick_delta;                 /* Last vs standard tick diff (usec). */
  111 static long tick_delta_sum;             /* Accumulated tick difference (usec).*/
  112 static long tick_adjustment;            /* Tick adjustments done. */
  113 static long tick_lost;                  /* Lost(coalesced) ticks number. */
  114 /* Adjusted vs non-adjusted curr_time difference (ticks). */
  115 static long tick_diff;
  116 
  117 static int              io_fast;
  118 static unsigned long    io_pkt;
  119 static unsigned long    io_pkt_fast;
  120 static unsigned long    io_pkt_drop;
  121 
  122 /*
  123  * Three heaps contain queues and pipes that the scheduler handles:
  124  *
  125  * ready_heap contains all dn_flow_queue related to fixed-rate pipes.
  126  *
  127  * wfq_ready_heap contains the pipes associated with WF2Q flows
  128  *
  129  * extract_heap contains pipes associated with delay lines.
  130  *
  131  */
  132 
  133 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
  134 
  135 static struct dn_heap ready_heap, extract_heap, wfq_ready_heap ;
  136 
  137 static int      heap_init(struct dn_heap *h, int size);
  138 static int      heap_insert (struct dn_heap *h, dn_key key1, void *p);
  139 static void     heap_extract(struct dn_heap *h, void *obj);
  140 static void     transmit_event(struct dn_pipe *pipe, struct mbuf **head,
  141                     struct mbuf **tail);
  142 static void     ready_event(struct dn_flow_queue *q, struct mbuf **head,
  143                     struct mbuf **tail);
  144 static void     ready_event_wfq(struct dn_pipe *p, struct mbuf **head,
  145                     struct mbuf **tail);
  146 
  147 #define HASHSIZE        16
  148 #define HASH(num)       ((((num) >> 8) ^ ((num) >> 4) ^ (num)) & 0x0f)
  149 static struct dn_pipe_head      pipehash[HASHSIZE];     /* all pipes */
  150 static struct dn_flow_set_head  flowsethash[HASHSIZE];  /* all flowsets */
  151 
  152 static struct callout dn_timeout;
  153 
  154 extern  void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
  155 
  156 #ifdef SYSCTL_NODE
  157 SYSCTL_DECL(_net_inet);
  158 SYSCTL_DECL(_net_inet_ip);
  159 
  160 SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW, 0, "Dummynet");
  161 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size,
  162     CTLFLAG_RW, &dn_hash_size, 0, "Default hash table size");
  163 #if 0 /* curr_time is 64 bit */
  164 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, curr_time,
  165     CTLFLAG_RD, &curr_time, 0, "Current tick");
  166 #endif
  167 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap,
  168     CTLFLAG_RD, &ready_heap.size, 0, "Size of ready heap");
  169 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap,
  170     CTLFLAG_RD, &extract_heap.size, 0, "Size of extract heap");
  171 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, searches,
  172     CTLFLAG_RD, &searches, 0, "Number of queue searches");
  173 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, search_steps,
  174     CTLFLAG_RD, &search_steps, 0, "Number of queue search steps");
  175 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire,
  176     CTLFLAG_RW, &pipe_expire, 0, "Expire queue if empty");
  177 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len,
  178     CTLFLAG_RW, &dn_max_ratio, 0,
  179     "Max ratio between dynamic queues and buckets");
  180 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
  181     CTLFLAG_RD, &red_lookup_depth, 0, "Depth of RED lookup table");
  182 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
  183     CTLFLAG_RD, &red_avg_pkt_size, 0, "RED Medium packet size");
  184 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
  185     CTLFLAG_RD, &red_max_pkt_size, 0, "RED Max packet size");
  186 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta,
  187     CTLFLAG_RD, &tick_delta, 0, "Last vs standard tick difference (usec).");
  188 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum,
  189     CTLFLAG_RD, &tick_delta_sum, 0, "Accumulated tick difference (usec).");
  190 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment,
  191     CTLFLAG_RD, &tick_adjustment, 0, "Tick adjustments done.");
  192 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff,
  193     CTLFLAG_RD, &tick_diff, 0,
  194     "Adjusted vs non-adjusted curr_time difference (ticks).");
  195 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost,
  196     CTLFLAG_RD, &tick_lost, 0,
  197     "Number of ticks coalesced by dummynet taskqueue.");
  198 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast,
  199     CTLFLAG_RW, &io_fast, 0, "Enable fast dummynet io.");
  200 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt,
  201     CTLFLAG_RD, &io_pkt, 0,
  202     "Number of packets passed to dummynet.");
  203 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast,
  204     CTLFLAG_RD, &io_pkt_fast, 0,
  205     "Number of packets bypassed dummynet scheduler.");
  206 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop,
  207     CTLFLAG_RD, &io_pkt_drop, 0,
  208     "Number of packets dropped by dummynet.");
  209 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit,
  210     CTLFLAG_RW, &pipe_slot_limit, 0, "Upper limit in slots for pipe queue.");
  211 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit,
  212     CTLFLAG_RW, &pipe_byte_limit, 0, "Upper limit in bytes for pipe queue.");
  213 #endif
  214 
  215 #ifdef DUMMYNET_DEBUG
  216 int     dummynet_debug = 0;
  217 #ifdef SYSCTL_NODE
  218 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW, &dummynet_debug,
  219             0, "control debugging printfs");
  220 #endif
  221 #define DPRINTF(X)      if (dummynet_debug) printf X
  222 #else
  223 #define DPRINTF(X)
  224 #endif
  225 
  226 static struct task      dn_task;
  227 static struct taskqueue *dn_tq = NULL;
  228 static void dummynet_task(void *, int);
  229 
  230 static struct mtx dummynet_mtx;
  231 #define DUMMYNET_LOCK_INIT() \
  232         mtx_init(&dummynet_mtx, "dummynet", NULL, MTX_DEF)
  233 #define DUMMYNET_LOCK_DESTROY() mtx_destroy(&dummynet_mtx)
  234 #define DUMMYNET_LOCK()         mtx_lock(&dummynet_mtx)
  235 #define DUMMYNET_UNLOCK()       mtx_unlock(&dummynet_mtx)
  236 #define DUMMYNET_LOCK_ASSERT()  mtx_assert(&dummynet_mtx, MA_OWNED)
  237 
  238 static int      config_pipe(struct dn_pipe *p);
  239 static int      ip_dn_ctl(struct sockopt *sopt);
  240 
  241 static void     dummynet(void *);
  242 static void     dummynet_flush(void);
  243 static void     dummynet_send(struct mbuf *);
  244 void            dummynet_drain(void);
  245 static ip_dn_io_t dummynet_io;
  246 static void     dn_rule_delete(void *);
  247 
  248 /*
  249  * Heap management functions.
  250  *
  251  * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
  252  * Some macros help finding parent/children so we can optimize them.
  253  *
  254  * heap_init() is called to expand the heap when needed.
  255  * Increment size in blocks of 16 entries.
  256  * XXX failure to allocate a new element is a pretty bad failure
  257  * as we basically stall a whole queue forever!!
  258  * Returns 1 on error, 0 on success
  259  */
  260 #define HEAP_FATHER(x) ( ( (x) - 1 ) / 2 )
  261 #define HEAP_LEFT(x) ( 2*(x) + 1 )
  262 #define HEAP_IS_LEFT(x) ( (x) & 1 )
  263 #define HEAP_RIGHT(x) ( 2*(x) + 2 )
  264 #define HEAP_SWAP(a, b, buffer) { buffer = a ; a = b ; b = buffer ; }
  265 #define HEAP_INCREMENT  15
  266 
  267 static int
  268 heap_init(struct dn_heap *h, int new_size)
  269 {
  270     struct dn_heap_entry *p;
  271 
  272     if (h->size >= new_size ) {
  273         printf("dummynet: %s, Bogus call, have %d want %d\n", __func__,
  274                 h->size, new_size);
  275         return 0 ;
  276     }
  277     new_size = (new_size + HEAP_INCREMENT ) & ~HEAP_INCREMENT ;
  278     p = malloc(new_size * sizeof(*p), M_DUMMYNET, M_NOWAIT);
  279     if (p == NULL) {
  280         printf("dummynet: %s, resize %d failed\n", __func__, new_size );
  281         return 1 ; /* error */
  282     }
  283     if (h->size > 0) {
  284         bcopy(h->p, p, h->size * sizeof(*p) );
  285         free(h->p, M_DUMMYNET);
  286     }
  287     h->p = p ;
  288     h->size = new_size ;
  289     return 0 ;
  290 }
  291 
  292 /*
  293  * Insert element in heap. Normally, p != NULL, we insert p in
  294  * a new position and bubble up. If p == NULL, then the element is
  295  * already in place, and key is the position where to start the
  296  * bubble-up.
  297  * Returns 1 on failure (cannot allocate new heap entry)
  298  *
  299  * If offset > 0 the position (index, int) of the element in the heap is
  300  * also stored in the element itself at the given offset in bytes.
  301  */
  302 #define SET_OFFSET(heap, node) \
  303     if (heap->offset > 0) \
  304             *((int *)((char *)(heap->p[node].object) + heap->offset)) = node ;
  305 /*
  306  * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
  307  */
  308 #define RESET_OFFSET(heap, node) \
  309     if (heap->offset > 0) \
  310             *((int *)((char *)(heap->p[node].object) + heap->offset)) = -1 ;
  311 static int
  312 heap_insert(struct dn_heap *h, dn_key key1, void *p)
  313 {
  314     int son = h->elements ;
  315 
  316     if (p == NULL)      /* data already there, set starting point */
  317         son = key1 ;
  318     else {              /* insert new element at the end, possibly resize */
  319         son = h->elements ;
  320         if (son == h->size) /* need resize... */
  321             if (heap_init(h, h->elements+1) )
  322                 return 1 ; /* failure... */
  323         h->p[son].object = p ;
  324         h->p[son].key = key1 ;
  325         h->elements++ ;
  326     }
  327     while (son > 0) {                           /* bubble up */
  328         int father = HEAP_FATHER(son) ;
  329         struct dn_heap_entry tmp  ;
  330 
  331         if (DN_KEY_LT( h->p[father].key, h->p[son].key ) )
  332             break ; /* found right position */
  333         /* son smaller than father, swap and repeat */
  334         HEAP_SWAP(h->p[son], h->p[father], tmp) ;
  335         SET_OFFSET(h, son);
  336         son = father ;
  337     }
  338     SET_OFFSET(h, son);
  339     return 0 ;
  340 }
  341 
  342 /*
  343  * remove top element from heap, or obj if obj != NULL
  344  */
  345 static void
  346 heap_extract(struct dn_heap *h, void *obj)
  347 {
  348     int child, father, max = h->elements - 1 ;
  349 
  350     if (max < 0) {
  351         printf("dummynet: warning, extract from empty heap 0x%p\n", h);
  352         return ;
  353     }
  354     father = 0 ; /* default: move up smallest child */
  355     if (obj != NULL) { /* extract specific element, index is at offset */
  356         if (h->offset <= 0)
  357             panic("dummynet: heap_extract from middle not supported on this heap!!!\n");
  358         father = *((int *)((char *)obj + h->offset)) ;
  359         if (father < 0 || father >= h->elements) {
  360             printf("dummynet: heap_extract, father %d out of bound 0..%d\n",
  361                 father, h->elements);
  362             panic("dummynet: heap_extract");
  363         }
  364     }
  365     RESET_OFFSET(h, father);
  366     child = HEAP_LEFT(father) ;         /* left child */
  367     while (child <= max) {              /* valid entry */
  368         if (child != max && DN_KEY_LT(h->p[child+1].key, h->p[child].key) )
  369             child = child+1 ;           /* take right child, otherwise left */
  370         h->p[father] = h->p[child] ;
  371         SET_OFFSET(h, father);
  372         father = child ;
  373         child = HEAP_LEFT(child) ;   /* left child for next loop */
  374     }
  375     h->elements-- ;
  376     if (father != max) {
  377         /*
  378          * Fill hole with last entry and bubble up, reusing the insert code
  379          */
  380         h->p[father] = h->p[max] ;
  381         heap_insert(h, father, NULL); /* this one cannot fail */
  382     }
  383 }
  384 
  385 #if 0
  386 /*
  387  * change object position and update references
  388  * XXX this one is never used!
  389  */
  390 static void
  391 heap_move(struct dn_heap *h, dn_key new_key, void *object)
  392 {
  393     int temp;
  394     int i ;
  395     int max = h->elements-1 ;
  396     struct dn_heap_entry buf ;
  397 
  398     if (h->offset <= 0)
  399         panic("cannot move items on this heap");
  400 
  401     i = *((int *)((char *)object + h->offset));
  402     if (DN_KEY_LT(new_key, h->p[i].key) ) { /* must move up */
  403         h->p[i].key = new_key ;
  404         for (; i>0 && DN_KEY_LT(new_key, h->p[(temp = HEAP_FATHER(i))].key) ;
  405                  i = temp ) { /* bubble up */
  406             HEAP_SWAP(h->p[i], h->p[temp], buf) ;
  407             SET_OFFSET(h, i);
  408         }
  409     } else {            /* must move down */
  410         h->p[i].key = new_key ;
  411         while ( (temp = HEAP_LEFT(i)) <= max ) { /* found left child */
  412             if ((temp != max) && DN_KEY_GT(h->p[temp].key, h->p[temp+1].key))
  413                 temp++ ; /* select child with min key */
  414             if (DN_KEY_GT(new_key, h->p[temp].key)) { /* go down */
  415                 HEAP_SWAP(h->p[i], h->p[temp], buf) ;
  416                 SET_OFFSET(h, i);
  417             } else
  418                 break ;
  419             i = temp ;
  420         }
  421     }
  422     SET_OFFSET(h, i);
  423 }
  424 #endif /* heap_move, unused */
  425 
  426 /*
  427  * heapify() will reorganize data inside an array to maintain the
  428  * heap property. It is needed when we delete a bunch of entries.
  429  */
  430 static void
  431 heapify(struct dn_heap *h)
  432 {
  433     int i ;
  434 
  435     for (i = 0 ; i < h->elements ; i++ )
  436         heap_insert(h, i , NULL) ;
  437 }
  438 
  439 /*
  440  * cleanup the heap and free data structure
  441  */
  442 static void
  443 heap_free(struct dn_heap *h)
  444 {
  445     if (h->size >0 )
  446         free(h->p, M_DUMMYNET);
  447     bzero(h, sizeof(*h) );
  448 }
  449 
  450 /*
  451  * --- end of heap management functions ---
  452  */
  453 
  454 /*
  455  * Return the mbuf tag holding the dummynet state.  As an optimization
  456  * this is assumed to be the first tag on the list.  If this turns out
  457  * wrong we'll need to search the list.
  458  */
  459 static struct dn_pkt_tag *
  460 dn_tag_get(struct mbuf *m)
  461 {
  462     struct m_tag *mtag = m_tag_first(m);
  463     KASSERT(mtag != NULL &&
  464             mtag->m_tag_cookie == MTAG_ABI_COMPAT &&
  465             mtag->m_tag_id == PACKET_TAG_DUMMYNET,
  466             ("packet on dummynet queue w/o dummynet tag!"));
  467     return (struct dn_pkt_tag *)(mtag+1);
  468 }
  469 
  470 /*
  471  * Scheduler functions:
  472  *
  473  * transmit_event() is called when the delay-line needs to enter
  474  * the scheduler, either because of existing pkts getting ready,
  475  * or new packets entering the queue. The event handled is the delivery
  476  * time of the packet.
  477  *
  478  * ready_event() does something similar with fixed-rate queues, and the
  479  * event handled is the finish time of the head pkt.
  480  *
  481  * wfq_ready_event() does something similar with WF2Q queues, and the
  482  * event handled is the start time of the head pkt.
  483  *
  484  * In all cases, we make sure that the data structures are consistent
  485  * before passing pkts out, because this might trigger recursive
  486  * invocations of the procedures.
  487  */
  488 static void
  489 transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail)
  490 {
  491         struct mbuf *m;
  492         struct dn_pkt_tag *pkt;
  493 
  494         DUMMYNET_LOCK_ASSERT();
  495 
  496         while ((m = pipe->head) != NULL) {
  497                 pkt = dn_tag_get(m);
  498                 if (!DN_KEY_LEQ(pkt->output_time, curr_time))
  499                         break;
  500 
  501                 pipe->head = m->m_nextpkt;
  502                 if (*tail != NULL)
  503                         (*tail)->m_nextpkt = m;
  504                 else
  505                         *head = m;
  506                 *tail = m;
  507         }
  508         if (*tail != NULL)
  509                 (*tail)->m_nextpkt = NULL;
  510 
  511         /* If there are leftover packets, put into the heap for next event. */
  512         if ((m = pipe->head) != NULL) {
  513                 pkt = dn_tag_get(m);
  514                 /*
  515                  * XXX Should check errors on heap_insert, by draining the
  516                  * whole pipe p and hoping in the future we are more successful.
  517                  */
  518                 heap_insert(&extract_heap, pkt->output_time, pipe);
  519         }
  520 }
  521 
  522 /*
  523  * the following macro computes how many ticks we have to wait
  524  * before being able to transmit a packet. The credit is taken from
  525  * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
  526  */
  527 #define SET_TICKS(_m, q, p)     \
  528     ((_m)->m_pkthdr.len * 8 * hz - (q)->numbytes + p->bandwidth - 1) / \
  529     p->bandwidth;
  530 
  531 /*
  532  * extract pkt from queue, compute output time (could be now)
  533  * and put into delay line (p_queue)
  534  */
  535 static void
  536 move_pkt(struct mbuf *pkt, struct dn_flow_queue *q, struct dn_pipe *p,
  537     int len)
  538 {
  539     struct dn_pkt_tag *dt = dn_tag_get(pkt);
  540 
  541     q->head = pkt->m_nextpkt ;
  542     q->len-- ;
  543     q->len_bytes -= len ;
  544 
  545     dt->output_time = curr_time + p->delay ;
  546 
  547     if (p->head == NULL)
  548         p->head = pkt;
  549     else
  550         p->tail->m_nextpkt = pkt;
  551     p->tail = pkt;
  552     p->tail->m_nextpkt = NULL;
  553 }
  554 
  555 /*
  556  * ready_event() is invoked every time the queue must enter the
  557  * scheduler, either because the first packet arrives, or because
  558  * a previously scheduled event fired.
  559  * On invokation, drain as many pkts as possible (could be 0) and then
  560  * if there are leftover packets reinsert the pkt in the scheduler.
  561  */
  562 static void
  563 ready_event(struct dn_flow_queue *q, struct mbuf **head, struct mbuf **tail)
  564 {
  565         struct mbuf *pkt;
  566         struct dn_pipe *p = q->fs->pipe;
  567         int p_was_empty;
  568 
  569         DUMMYNET_LOCK_ASSERT();
  570 
  571         if (p == NULL) {
  572                 printf("dummynet: ready_event- pipe is gone\n");
  573                 return;
  574         }
  575         p_was_empty = (p->head == NULL);
  576 
  577         /*
  578          * Schedule fixed-rate queues linked to this pipe:
  579          * account for the bw accumulated since last scheduling, then
  580          * drain as many pkts as allowed by q->numbytes and move to
  581          * the delay line (in p) computing output time.
  582          * bandwidth==0 (no limit) means we can drain the whole queue,
  583          * setting len_scaled = 0 does the job.
  584          */
  585         q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
  586         while ((pkt = q->head) != NULL) {
  587                 int len = pkt->m_pkthdr.len;
  588                 int len_scaled = p->bandwidth ? len * 8 * hz : 0;
  589 
  590                 if (len_scaled > q->numbytes)
  591                         break;
  592                 q->numbytes -= len_scaled;
  593                 move_pkt(pkt, q, p, len);
  594         }
  595         /*
  596          * If we have more packets queued, schedule next ready event
  597          * (can only occur when bandwidth != 0, otherwise we would have
  598          * flushed the whole queue in the previous loop).
  599          * To this purpose we record the current time and compute how many
  600          * ticks to go for the finish time of the packet.
  601          */
  602         if ((pkt = q->head) != NULL) {  /* this implies bandwidth != 0 */
  603                 dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
  604 
  605                 q->sched_time = curr_time;
  606                 heap_insert(&ready_heap, curr_time + t, (void *)q);
  607                 /*
  608                  * XXX Should check errors on heap_insert, and drain the whole
  609                  * queue on error hoping next time we are luckier.
  610                  */
  611         } else          /* RED needs to know when the queue becomes empty. */
  612                 q->q_time = curr_time;
  613 
  614         /*
  615          * If the delay line was empty call transmit_event() now.
  616          * Otherwise, the scheduler will take care of it.
  617          */
  618         if (p_was_empty)
  619                 transmit_event(p, head, tail);
  620 }
  621 
  622 /*
  623  * Called when we can transmit packets on WF2Q queues. Take pkts out of
  624  * the queues at their start time, and enqueue into the delay line.
  625  * Packets are drained until p->numbytes < 0. As long as
  626  * len_scaled >= p->numbytes, the packet goes into the delay line
  627  * with a deadline p->delay. For the last packet, if p->numbytes < 0,
  628  * there is an additional delay.
  629  */
  630 static void
  631 ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail)
  632 {
  633         int p_was_empty = (p->head == NULL);
  634         struct dn_heap *sch = &(p->scheduler_heap);
  635         struct dn_heap *neh = &(p->not_eligible_heap);
  636         int64_t p_numbytes = p->numbytes;
  637 
  638         DUMMYNET_LOCK_ASSERT();
  639 
  640         if (p->if_name[0] == 0)         /* tx clock is simulated */
  641                 /*
  642                  * Since result may not fit into p->numbytes (32bit) we
  643                  * are using 64bit var here.
  644                  */
  645                 p_numbytes += (curr_time - p->sched_time) * p->bandwidth;
  646         else {  /*
  647                  * tx clock is for real,
  648                  * the ifq must be empty or this is a NOP.
  649                  */
  650                 if (p->ifp && p->ifp->if_snd.ifq_head != NULL)
  651                         return;
  652                 else {
  653                         DPRINTF(("dummynet: pipe %d ready from %s --\n",
  654                             p->pipe_nr, p->if_name));
  655                 }
  656         }
  657 
  658         /*
  659          * While we have backlogged traffic AND credit, we need to do
  660          * something on the queue.
  661          */
  662         while (p_numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) {
  663                 if (sch->elements > 0) {
  664                         /* Have some eligible pkts to send out. */
  665                         struct dn_flow_queue *q = sch->p[0].object;
  666                         struct mbuf *pkt = q->head;
  667                         struct dn_flow_set *fs = q->fs;
  668                         uint64_t len = pkt->m_pkthdr.len;
  669                         int len_scaled = p->bandwidth ? len * 8 * hz : 0;
  670 
  671                         heap_extract(sch, NULL); /* Remove queue from heap. */
  672                         p_numbytes -= len_scaled;
  673                         move_pkt(pkt, q, p, len);
  674 
  675                         p->V += (len << MY_M) / p->sum; /* Update V. */
  676                         q->S = q->F;                    /* Update start time. */
  677                         if (q->len == 0) {
  678                                 /* Flow not backlogged any more. */
  679                                 fs->backlogged--;
  680                                 heap_insert(&(p->idle_heap), q->F, q);
  681                         } else {
  682                                 /* Still backlogged. */
  683 
  684                                 /*
  685                                  * Update F and position in backlogged queue,
  686                                  * then put flow in not_eligible_heap
  687                                  * (we will fix this later).
  688                                  */
  689                                 len = (q->head)->m_pkthdr.len;
  690                                 q->F += (len << MY_M) / (uint64_t)fs->weight;
  691                                 if (DN_KEY_LEQ(q->S, p->V))
  692                                         heap_insert(neh, q->S, q);
  693                                 else
  694                                         heap_insert(sch, q->F, q);
  695                         }
  696                 }
  697                 /*
  698                  * Now compute V = max(V, min(S_i)). Remember that all elements
  699                  * in sch have by definition S_i <= V so if sch is not empty,
  700                  * V is surely the max and we must not update it. Conversely,
  701                  * if sch is empty we only need to look at neh.
  702                  */
  703                 if (sch->elements == 0 && neh->elements > 0)
  704                         p->V = MAX64(p->V, neh->p[0].key);
  705                 /* Move from neh to sch any packets that have become eligible */
  706                 while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) {
  707                         struct dn_flow_queue *q = neh->p[0].object;
  708                         heap_extract(neh, NULL);
  709                         heap_insert(sch, q->F, q);
  710                 }
  711 
  712                 if (p->if_name[0] != '\0') { /* Tx clock is from a real thing */
  713                         p_numbytes = -1;        /* Mark not ready for I/O. */
  714                         break;
  715                 }
  716         }
  717         if (sch->elements == 0 && neh->elements == 0 && p_numbytes >= 0 &&
  718             p->idle_heap.elements > 0) {
  719                 /*
  720                  * No traffic and no events scheduled.
  721                  * We can get rid of idle-heap.
  722                  */
  723                 int i;
  724 
  725                 for (i = 0; i < p->idle_heap.elements; i++) {
  726                         struct dn_flow_queue *q = p->idle_heap.p[i].object;
  727 
  728                         q->F = 0;
  729                         q->S = q->F + 1;
  730                 }
  731                 p->sum = 0;
  732                 p->V = 0;
  733                 p->idle_heap.elements = 0;
  734         }
  735         /*
  736          * If we are getting clocks from dummynet (not a real interface) and
  737          * If we are under credit, schedule the next ready event.
  738          * Also fix the delivery time of the last packet.
  739          */
  740         if (p->if_name[0]==0 && p_numbytes < 0) { /* This implies bw > 0. */
  741                 dn_key t = 0;           /* Number of ticks i have to wait. */
  742 
  743                 if (p->bandwidth > 0)
  744                         t = (p->bandwidth - 1 - p_numbytes) / p->bandwidth;
  745                 dn_tag_get(p->tail)->output_time += t;
  746                 p->sched_time = curr_time;
  747                 heap_insert(&wfq_ready_heap, curr_time + t, (void *)p);
  748                 /*
  749                  * XXX Should check errors on heap_insert, and drain the whole
  750                  * queue on error hoping next time we are luckier.
  751                  */
  752         }
  753 
  754         /* Fit (adjust if necessary) 64bit result into 32bit variable. */
  755         if (p_numbytes > INT_MAX)
  756                 p->numbytes = INT_MAX;
  757         else if (p_numbytes < INT_MIN)
  758                 p->numbytes = INT_MIN;
  759         else
  760                 p->numbytes = p_numbytes;
  761 
  762         /*
  763          * If the delay line was empty call transmit_event() now.
  764          * Otherwise, the scheduler will take care of it.
  765          */
  766         if (p_was_empty)
  767                 transmit_event(p, head, tail);
  768 }
  769 
  770 /*
  771  * This is called one tick, after previous run. It is used to
  772  * schedule next run.
  773  */
  774 static void
  775 dummynet(void * __unused unused)
  776 {
  777 
  778         taskqueue_enqueue(dn_tq, &dn_task);
  779 }
  780 
  781 /*
  782  * The main dummynet processing function.
  783  */
  784 static void
  785 dummynet_task(void *context, int pending)
  786 {
  787         struct mbuf *head = NULL, *tail = NULL;
  788         struct dn_pipe *pipe;
  789         struct dn_heap *heaps[3];
  790         struct dn_heap *h;
  791         void *p;        /* generic parameter to handler */
  792         int i;
  793 
  794         DUMMYNET_LOCK();
  795 
  796         heaps[0] = &ready_heap;                 /* fixed-rate queues */
  797         heaps[1] = &wfq_ready_heap;             /* wfq queues */
  798         heaps[2] = &extract_heap;               /* delay line */
  799 
  800         /* Update number of lost(coalesced) ticks. */
  801         tick_lost += pending - 1;
  802  
  803         getmicrouptime(&t);
  804         /* Last tick duration (usec). */
  805         tick_last = (t.tv_sec - prev_t.tv_sec) * 1000000 +
  806             (t.tv_usec - prev_t.tv_usec);
  807         /* Last tick vs standard tick difference (usec). */
  808         tick_delta = (tick_last * hz - 1000000) / hz;
  809         /* Accumulated tick difference (usec). */
  810         tick_delta_sum += tick_delta;
  811  
  812         prev_t = t;
  813  
  814         /*
  815          * Adjust curr_time if accumulated tick difference greater than
  816          * 'standard' tick. Since curr_time should be monotonically increasing,
  817          * we do positive adjustment as required and throttle curr_time in
  818          * case of negative adjustment.
  819          */
  820         curr_time++;
  821         if (tick_delta_sum - tick >= 0) {
  822                 int diff = tick_delta_sum / tick;
  823  
  824                 curr_time += diff;
  825                 tick_diff += diff;
  826                 tick_delta_sum %= tick;
  827                 tick_adjustment++;
  828         } else if (tick_delta_sum + tick <= 0) {
  829                 curr_time--;
  830                 tick_diff--;
  831                 tick_delta_sum += tick;
  832                 tick_adjustment++;
  833         }
  834 
  835         for (i = 0; i < 3; i++) {
  836                 h = heaps[i];
  837                 while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
  838                         if (h->p[0].key > curr_time)
  839                                 printf("dummynet: warning, "
  840                                     "heap %d is %d ticks late\n",
  841                                     i, (int)(curr_time - h->p[0].key));
  842                         /* store a copy before heap_extract */
  843                         p = h->p[0].object;
  844                         /* need to extract before processing */
  845                         heap_extract(h, NULL);
  846                         if (i == 0)
  847                                 ready_event(p, &head, &tail);
  848                         else if (i == 1) {
  849                                 struct dn_pipe *pipe = p;
  850                                 if (pipe->if_name[0] != '\0')
  851                                         printf("dummynet: bad ready_event_wfq "
  852                                             "for pipe %s\n", pipe->if_name);
  853                                 else
  854                                         ready_event_wfq(p, &head, &tail);
  855                         } else
  856                                 transmit_event(p, &head, &tail);
  857                 }
  858         }
  859 
  860         /* Sweep pipes trying to expire idle flow_queues. */
  861         for (i = 0; i < HASHSIZE; i++)
  862                 SLIST_FOREACH(pipe, &pipehash[i], next)
  863                         if (pipe->idle_heap.elements > 0 &&
  864                             DN_KEY_LT(pipe->idle_heap.p[0].key, pipe->V)) {
  865                                 struct dn_flow_queue *q =
  866                                     pipe->idle_heap.p[0].object;
  867 
  868                                 heap_extract(&(pipe->idle_heap), NULL);
  869                                 /* Mark timestamp as invalid. */
  870                                 q->S = q->F + 1;
  871                                 pipe->sum -= q->fs->weight;
  872                         }
  873 
  874         DUMMYNET_UNLOCK();
  875 
  876         if (head != NULL)
  877                 dummynet_send(head);
  878 
  879         callout_reset(&dn_timeout, 1, dummynet, NULL);
  880 }
  881 
  882 static void
  883 dummynet_send(struct mbuf *m)
  884 {
  885         struct dn_pkt_tag *pkt;
  886         struct mbuf *n;
  887         struct ip *ip;
  888 
  889         for (; m != NULL; m = n) {
  890                 n = m->m_nextpkt;
  891                 m->m_nextpkt = NULL;
  892                 pkt = dn_tag_get(m);
  893                 switch (pkt->dn_dir) {
  894                 case DN_TO_IP_OUT:
  895                         ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
  896                         break ;
  897                 case DN_TO_IP_IN :
  898                         ip = mtod(m, struct ip *);
  899                         ip->ip_len = htons(ip->ip_len);
  900                         ip->ip_off = htons(ip->ip_off);
  901                         netisr_dispatch(NETISR_IP, m);
  902                         break;
  903 #ifdef INET6
  904                 case DN_TO_IP6_IN:
  905                         netisr_dispatch(NETISR_IPV6, m);
  906                         break;
  907 
  908                 case DN_TO_IP6_OUT:
  909                         ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
  910                         break;
  911 #endif
  912                 case DN_TO_IFB_FWD:
  913                         if (bridge_dn_p != NULL)
  914                                 ((*bridge_dn_p)(m, pkt->ifp));
  915                         else
  916                                 printf("dummynet: if_bridge not loaded\n");
  917 
  918                         break;
  919                 case DN_TO_ETH_DEMUX:
  920                         /*
  921                          * The Ethernet code assumes the Ethernet header is
  922                          * contiguous in the first mbuf header.
  923                          * Insure this is true.
  924                          */
  925                         if (m->m_len < ETHER_HDR_LEN &&
  926                             (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
  927                                 printf("dummynet/ether: pullup failed, "
  928                                     "dropping packet\n");
  929                                 break;
  930                         }
  931                         ether_demux(m->m_pkthdr.rcvif, m);
  932                         break;
  933                 case DN_TO_ETH_OUT:
  934                         ether_output_frame(pkt->ifp, m);
  935                         break;
  936                 default:
  937                         printf("dummynet: bad switch %d!\n", pkt->dn_dir);
  938                         m_freem(m);
  939                         break;
  940                 }
  941         }
  942 }
  943 
  944 /*
  945  * Unconditionally expire empty queues in case of shortage.
  946  * Returns the number of queues freed.
  947  */
  948 static int
  949 expire_queues(struct dn_flow_set *fs)
  950 {
  951     struct dn_flow_queue *q, *prev ;
  952     int i, initial_elements = fs->rq_elements ;
  953 
  954     if (fs->last_expired == time_uptime)
  955         return 0 ;
  956     fs->last_expired = time_uptime ;
  957     for (i = 0 ; i <= fs->rq_size ; i++) /* last one is overflow */
  958         for (prev=NULL, q = fs->rq[i] ; q != NULL ; )
  959             if (q->head != NULL || q->S != q->F+1) {
  960                 prev = q ;
  961                 q = q->next ;
  962             } else { /* entry is idle, expire it */
  963                 struct dn_flow_queue *old_q = q ;
  964 
  965                 if (prev != NULL)
  966                     prev->next = q = q->next ;
  967                 else
  968                     fs->rq[i] = q = q->next ;
  969                 fs->rq_elements-- ;
  970                 free(old_q, M_DUMMYNET);
  971             }
  972     return initial_elements - fs->rq_elements ;
  973 }
  974 
  975 /*
  976  * If room, create a new queue and put at head of slot i;
  977  * otherwise, create or use the default queue.
  978  */
  979 static struct dn_flow_queue *
  980 create_queue(struct dn_flow_set *fs, int i)
  981 {
  982         struct dn_flow_queue *q;
  983 
  984         if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
  985             expire_queues(fs) == 0) {
  986                 /* No way to get room, use or create overflow queue. */
  987                 i = fs->rq_size;
  988                 if (fs->rq[i] != NULL)
  989                     return fs->rq[i];
  990         }
  991         q = malloc(sizeof(*q), M_DUMMYNET, M_NOWAIT | M_ZERO);
  992         if (q == NULL) {
  993                 printf("dummynet: sorry, cannot allocate queue for new flow\n");
  994                 return (NULL);
  995         }
  996         q->fs = fs;
  997         q->hash_slot = i;
  998         q->next = fs->rq[i];
  999         q->S = q->F + 1;        /* hack - mark timestamp as invalid. */
 1000         q->numbytes = io_fast ? fs->pipe->bandwidth : 0;
 1001         fs->rq[i] = q;
 1002         fs->rq_elements++;
 1003         return (q);
 1004 }
 1005 
 1006 /*
 1007  * Given a flow_set and a pkt in last_pkt, find a matching queue
 1008  * after appropriate masking. The queue is moved to front
 1009  * so that further searches take less time.
 1010  */
 1011 static struct dn_flow_queue *
 1012 find_queue(struct dn_flow_set *fs, struct ipfw_flow_id *id)
 1013 {
 1014     int i = 0 ; /* we need i and q for new allocations */
 1015     struct dn_flow_queue *q, *prev;
 1016     int is_v6 = IS_IP6_FLOW_ID(id);
 1017 
 1018     if ( !(fs->flags_fs & DN_HAVE_FLOW_MASK) )
 1019         q = fs->rq[0] ;
 1020     else {
 1021         /* first, do the masking, then hash */
 1022         id->dst_port &= fs->flow_mask.dst_port ;
 1023         id->src_port &= fs->flow_mask.src_port ;
 1024         id->proto &= fs->flow_mask.proto ;
 1025         id->flags = 0 ; /* we don't care about this one */
 1026         if (is_v6) {
 1027             APPLY_MASK(&id->dst_ip6, &fs->flow_mask.dst_ip6);
 1028             APPLY_MASK(&id->src_ip6, &fs->flow_mask.src_ip6);
 1029             id->flow_id6 &= fs->flow_mask.flow_id6;
 1030 
 1031             i = ((id->dst_ip6.__u6_addr.__u6_addr32[0]) & 0xffff)^
 1032                 ((id->dst_ip6.__u6_addr.__u6_addr32[1]) & 0xffff)^
 1033                 ((id->dst_ip6.__u6_addr.__u6_addr32[2]) & 0xffff)^
 1034                 ((id->dst_ip6.__u6_addr.__u6_addr32[3]) & 0xffff)^
 1035 
 1036                 ((id->dst_ip6.__u6_addr.__u6_addr32[0] >> 15) & 0xffff)^
 1037                 ((id->dst_ip6.__u6_addr.__u6_addr32[1] >> 15) & 0xffff)^
 1038                 ((id->dst_ip6.__u6_addr.__u6_addr32[2] >> 15) & 0xffff)^
 1039                 ((id->dst_ip6.__u6_addr.__u6_addr32[3] >> 15) & 0xffff)^
 1040 
 1041                 ((id->src_ip6.__u6_addr.__u6_addr32[0] << 1) & 0xfffff)^
 1042                 ((id->src_ip6.__u6_addr.__u6_addr32[1] << 1) & 0xfffff)^
 1043                 ((id->src_ip6.__u6_addr.__u6_addr32[2] << 1) & 0xfffff)^
 1044                 ((id->src_ip6.__u6_addr.__u6_addr32[3] << 1) & 0xfffff)^
 1045 
 1046                 ((id->src_ip6.__u6_addr.__u6_addr32[0] << 16) & 0xffff)^
 1047                 ((id->src_ip6.__u6_addr.__u6_addr32[1] << 16) & 0xffff)^
 1048                 ((id->src_ip6.__u6_addr.__u6_addr32[2] << 16) & 0xffff)^
 1049                 ((id->src_ip6.__u6_addr.__u6_addr32[3] << 16) & 0xffff)^
 1050 
 1051                 (id->dst_port << 1) ^ (id->src_port) ^
 1052                 (id->proto ) ^
 1053                 (id->flow_id6);
 1054         } else {
 1055             id->dst_ip &= fs->flow_mask.dst_ip ;
 1056             id->src_ip &= fs->flow_mask.src_ip ;
 1057 
 1058             i = ( (id->dst_ip) & 0xffff ) ^
 1059                 ( (id->dst_ip >> 15) & 0xffff ) ^
 1060                 ( (id->src_ip << 1) & 0xffff ) ^
 1061                 ( (id->src_ip >> 16 ) & 0xffff ) ^
 1062                 (id->dst_port << 1) ^ (id->src_port) ^
 1063                 (id->proto );
 1064         }
 1065         i = i % fs->rq_size ;
 1066         /* finally, scan the current list for a match */
 1067         searches++ ;
 1068         for (prev=NULL, q = fs->rq[i] ; q ; ) {
 1069             search_steps++;
 1070             if (is_v6 &&
 1071                     IN6_ARE_ADDR_EQUAL(&id->dst_ip6,&q->id.dst_ip6) &&  
 1072                     IN6_ARE_ADDR_EQUAL(&id->src_ip6,&q->id.src_ip6) &&  
 1073                     id->dst_port == q->id.dst_port &&
 1074                     id->src_port == q->id.src_port &&
 1075                     id->proto == q->id.proto &&
 1076                     id->flags == q->id.flags &&
 1077                     id->flow_id6 == q->id.flow_id6)
 1078                 break ; /* found */
 1079 
 1080             if (!is_v6 && id->dst_ip == q->id.dst_ip &&
 1081                     id->src_ip == q->id.src_ip &&
 1082                     id->dst_port == q->id.dst_port &&
 1083                     id->src_port == q->id.src_port &&
 1084                     id->proto == q->id.proto &&
 1085                     id->flags == q->id.flags)
 1086                 break ; /* found */
 1087 
 1088             /* No match. Check if we can expire the entry */
 1089             if (pipe_expire && q->head == NULL && q->S == q->F+1 ) {
 1090                 /* entry is idle and not in any heap, expire it */
 1091                 struct dn_flow_queue *old_q = q ;
 1092 
 1093                 if (prev != NULL)
 1094                     prev->next = q = q->next ;
 1095                 else
 1096                     fs->rq[i] = q = q->next ;
 1097                 fs->rq_elements-- ;
 1098                 free(old_q, M_DUMMYNET);
 1099                 continue ;
 1100             }
 1101             prev = q ;
 1102             q = q->next ;
 1103         }
 1104         if (q && prev != NULL) { /* found and not in front */
 1105             prev->next = q->next ;
 1106             q->next = fs->rq[i] ;
 1107             fs->rq[i] = q ;
 1108         }
 1109     }
 1110     if (q == NULL) { /* no match, need to allocate a new entry */
 1111         q = create_queue(fs, i);
 1112         if (q != NULL)
 1113         q->id = *id ;
 1114     }
 1115     return q ;
 1116 }
 1117 
 1118 static int
 1119 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
 1120 {
 1121         /*
 1122          * RED algorithm
 1123          *
 1124          * RED calculates the average queue size (avg) using a low-pass filter
 1125          * with an exponential weighted (w_q) moving average:
 1126          *      avg  <-  (1-w_q) * avg + w_q * q_size
 1127          * where q_size is the queue length (measured in bytes or * packets).
 1128          *
 1129          * If q_size == 0, we compute the idle time for the link, and set
 1130          *      avg = (1 - w_q)^(idle/s)
 1131          * where s is the time needed for transmitting a medium-sized packet.
 1132          *
 1133          * Now, if avg < min_th the packet is enqueued.
 1134          * If avg > max_th the packet is dropped. Otherwise, the packet is
 1135          * dropped with probability P function of avg.
 1136          */
 1137 
 1138         int64_t p_b = 0;
 1139 
 1140         /* Queue in bytes or packets? */
 1141         u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ?
 1142             q->len_bytes : q->len;
 1143 
 1144         DPRINTF(("\ndummynet: %d q: %2u ", (int)curr_time, q_size));
 1145 
 1146         /* Average queue size estimation. */
 1147         if (q_size != 0) {
 1148                 /* Queue is not empty, avg <- avg + (q_size - avg) * w_q */
 1149                 int diff = SCALE(q_size) - q->avg;
 1150                 int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
 1151 
 1152                 q->avg += (int)v;
 1153         } else {
 1154                 /*
 1155                  * Queue is empty, find for how long the queue has been
 1156                  * empty and use a lookup table for computing
 1157                  * (1 - * w_q)^(idle_time/s) where s is the time to send a
 1158                  * (small) packet.
 1159                  * XXX check wraps...
 1160                  */
 1161                 if (q->avg) {
 1162                         u_int t = ((uint32_t)curr_time - q->q_time) /
 1163                             fs->lookup_step;
 1164 
 1165                         q->avg = (t < fs->lookup_depth) ?
 1166                             SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
 1167                 }
 1168         }
 1169         DPRINTF(("dummynet: avg: %u ", SCALE_VAL(q->avg)));
 1170 
 1171         /* Should i drop? */
 1172         if (q->avg < fs->min_th) {
 1173                 q->count = -1;
 1174                 return (0);     /* accept packet */
 1175         }
 1176         if (q->avg >= fs->max_th) {     /* average queue >=  max threshold */
 1177                 if (fs->flags_fs & DN_IS_GENTLE_RED) {
 1178                         /*
 1179                          * According to Gentle-RED, if avg is greater than
 1180                          * max_th the packet is dropped with a probability
 1181                          *       p_b = c_3 * avg - c_4
 1182                          * where c_3 = (1 - max_p) / max_th
 1183                          *       c_4 = 1 - 2 * max_p
 1184                          */
 1185                         p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) -
 1186                             fs->c_4;
 1187                 } else {
 1188                         q->count = -1;
 1189                         DPRINTF(("dummynet: - drop"));
 1190                         return (1);
 1191                 }
 1192         } else if (q->avg > fs->min_th) {
 1193                 /*
 1194                  * We compute p_b using the linear dropping function
 1195                  *       p_b = c_1 * avg - c_2
 1196                  * where c_1 = max_p / (max_th - min_th)
 1197                  *       c_2 = max_p * min_th / (max_th - min_th)
 1198                  */
 1199                 p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
 1200         }
 1201 
 1202         if (fs->flags_fs & DN_QSIZE_IS_BYTES)
 1203                 p_b = (p_b * len) / fs->max_pkt_size;
 1204         if (++q->count == 0)
 1205                 q->random = random() & 0xffff;
 1206         else {
 1207                 /*
 1208                  * q->count counts packets arrived since last drop, so a greater
 1209                  * value of q->count means a greater packet drop probability.
 1210                  */
 1211                 if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
 1212                         q->count = 0;
 1213                         DPRINTF(("dummynet: - red drop"));
 1214                         /* After a drop we calculate a new random value. */
 1215                         q->random = random() & 0xffff;
 1216                         return (1);     /* drop */
 1217                 }
 1218         }
 1219         /* End of RED algorithm. */
 1220 
 1221         return (0);     /* accept */
 1222 }
 1223 
 1224 static __inline struct dn_flow_set *
 1225 locate_flowset(int fs_nr)
 1226 {
 1227         struct dn_flow_set *fs;
 1228 
 1229         SLIST_FOREACH(fs, &flowsethash[HASH(fs_nr)], next)
 1230                 if (fs->fs_nr == fs_nr)
 1231                         return (fs);
 1232 
 1233         return (NULL);
 1234 }
 1235 
 1236 static __inline struct dn_pipe *
 1237 locate_pipe(int pipe_nr)
 1238 {
 1239         struct dn_pipe *pipe;
 1240 
 1241         SLIST_FOREACH(pipe, &pipehash[HASH(pipe_nr)], next)
 1242                 if (pipe->pipe_nr == pipe_nr)
 1243                         return (pipe);
 1244 
 1245         return (NULL);
 1246 }
 1247 
 1248 /*
 1249  * dummynet hook for packets. Below 'pipe' is a pipe or a queue
 1250  * depending on whether WF2Q or fixed bw is used.
 1251  *
 1252  * pipe_nr      pipe or queue the packet is destined for.
 1253  * dir          where shall we send the packet after dummynet.
 1254  * m            the mbuf with the packet
 1255  * ifp          the 'ifp' parameter from the caller.
 1256  *              NULL in ip_input, destination interface in ip_output,
 1257  * rule         matching rule, in case of multiple passes
 1258  */
 1259 static int
 1260 dummynet_io(struct mbuf **m0, int dir, struct ip_fw_args *fwa)
 1261 {
 1262         struct mbuf *m = *m0, *head = NULL, *tail = NULL;
 1263         struct dn_pkt_tag *pkt;
 1264         struct m_tag *mtag;
 1265         struct dn_flow_set *fs = NULL;
 1266         struct dn_pipe *pipe;
 1267         uint64_t len = m->m_pkthdr.len;
 1268         struct dn_flow_queue *q = NULL;
 1269         int is_pipe;
 1270         ipfw_insn *cmd = ACTION_PTR(fwa->rule);
 1271 
 1272         KASSERT(m->m_nextpkt == NULL,
 1273             ("dummynet_io: mbuf queue passed to dummynet"));
 1274 
 1275         if (cmd->opcode == O_LOG)
 1276                 cmd += F_LEN(cmd);
 1277         if (cmd->opcode == O_ALTQ)
 1278                 cmd += F_LEN(cmd);
 1279         if (cmd->opcode == O_TAG)
 1280                 cmd += F_LEN(cmd);
 1281         is_pipe = (cmd->opcode == O_PIPE);
 1282 
 1283         DUMMYNET_LOCK();
 1284         io_pkt++;
 1285         /*
 1286          * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule.
 1287          *
 1288          * XXXGL: probably the pipe->fs and fs->pipe logic here
 1289          * below can be simplified.
 1290          */
 1291         if (is_pipe) {
 1292                 pipe = locate_pipe(fwa->cookie);
 1293                 if (pipe != NULL)
 1294                         fs = &(pipe->fs);
 1295         } else
 1296                 fs = locate_flowset(fwa->cookie);
 1297 
 1298         if (fs == NULL)
 1299                 goto dropit;    /* This queue/pipe does not exist! */
 1300         pipe = fs->pipe;
 1301         if (pipe == NULL) {     /* Must be a queue, try find a matching pipe. */
 1302                 pipe = locate_pipe(fs->parent_nr);
 1303                 if (pipe != NULL)
 1304                         fs->pipe = pipe;
 1305                 else {
 1306                         printf("dummynet: no pipe %d for queue %d, drop pkt\n",
 1307                             fs->parent_nr, fs->fs_nr);
 1308                         goto dropit;
 1309                 }
 1310         }
 1311         q = find_queue(fs, &(fwa->f_id));
 1312         if (q == NULL)
 1313                 goto dropit;            /* Cannot allocate queue. */
 1314 
 1315         /* Update statistics, then check reasons to drop pkt. */
 1316         q->tot_bytes += len;
 1317         q->tot_pkts++;
 1318         if (fs->plr && random() < fs->plr)
 1319                 goto dropit;            /* Random pkt drop. */
 1320         if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
 1321                 if (q->len_bytes > fs->qsize)
 1322                         goto dropit;    /* Queue size overflow. */
 1323         } else {
 1324                 if (q->len >= fs->qsize)
 1325                         goto dropit;    /* Queue count overflow. */
 1326         }
 1327         if (fs->flags_fs & DN_IS_RED && red_drops(fs, q, len))
 1328                 goto dropit;
 1329 
 1330         /* XXX expensive to zero, see if we can remove it. */
 1331         mtag = m_tag_get(PACKET_TAG_DUMMYNET,
 1332             sizeof(struct dn_pkt_tag), M_NOWAIT | M_ZERO);
 1333         if (mtag == NULL)
 1334                 goto dropit;            /* Cannot allocate packet header. */
 1335         m_tag_prepend(m, mtag);         /* Attach to mbuf chain. */
 1336 
 1337         pkt = (struct dn_pkt_tag *)(mtag + 1);
 1338         /*
 1339          * Ok, i can handle the pkt now...
 1340          * Build and enqueue packet + parameters.
 1341          */
 1342         pkt->rule = fwa->rule;
 1343         pkt->dn_dir = dir;
 1344 
 1345         pkt->ifp = fwa->oif;
 1346 
 1347         if (q->head == NULL)
 1348                 q->head = m;
 1349         else
 1350                 q->tail->m_nextpkt = m;
 1351         q->tail = m;
 1352         q->len++;
 1353         q->len_bytes += len;
 1354 
 1355         if (q->head != m)               /* Flow was not idle, we are done. */
 1356                 goto done;
 1357 
 1358         if (q->q_time < (uint32_t)curr_time)
 1359                 q->numbytes = io_fast ? fs->pipe->bandwidth : 0;
 1360         q->q_time = curr_time;
 1361 
 1362         /*
 1363          * If we reach this point the flow was previously idle, so we need
 1364          * to schedule it. This involves different actions for fixed-rate or
 1365          * WF2Q queues.
 1366          */
 1367         if (is_pipe) {
 1368                 /* Fixed-rate queue: just insert into the ready_heap. */
 1369                 dn_key t = 0;
 1370 
 1371                 if (pipe->bandwidth && m->m_pkthdr.len * 8 * hz > q->numbytes)
 1372                         t = SET_TICKS(m, q, pipe);
 1373                 q->sched_time = curr_time;
 1374                 if (t == 0)             /* Must process it now. */
 1375                         ready_event(q, &head, &tail);
 1376                 else
 1377                         heap_insert(&ready_heap, curr_time + t , q);
 1378         } else {
 1379                 /*
 1380                  * WF2Q. First, compute start time S: if the flow was
 1381                  * idle (S = F + 1) set S to the virtual time V for the
 1382                  * controlling pipe, and update the sum of weights for the pipe;
 1383                  * otherwise, remove flow from idle_heap and set S to max(F,V).
 1384                  * Second, compute finish time F = S + len / weight.
 1385                  * Third, if pipe was idle, update V = max(S, V).
 1386                  * Fourth, count one more backlogged flow.
 1387                  */
 1388                 if (DN_KEY_GT(q->S, q->F)) { /* Means timestamps are invalid. */
 1389                         q->S = pipe->V;
 1390                         pipe->sum += fs->weight; /* Add weight of new queue. */
 1391                 } else {
 1392                         heap_extract(&(pipe->idle_heap), q);
 1393                         q->S = MAX64(q->F, pipe->V);
 1394                 }
 1395                 q->F = q->S + (len << MY_M) / (uint64_t)fs->weight;
 1396 
 1397                 if (pipe->not_eligible_heap.elements == 0 &&
 1398                     pipe->scheduler_heap.elements == 0)
 1399                         pipe->V = MAX64(q->S, pipe->V);
 1400                 fs->backlogged++;
 1401                 /*
 1402                  * Look at eligibility. A flow is not eligibile if S>V (when
 1403                  * this happens, it means that there is some other flow already
 1404                  * scheduled for the same pipe, so the scheduler_heap cannot be
 1405                  * empty). If the flow is not eligible we just store it in the
 1406                  * not_eligible_heap. Otherwise, we store in the scheduler_heap
 1407                  * and possibly invoke ready_event_wfq() right now if there is
 1408                  * leftover credit.
 1409                  * Note that for all flows in scheduler_heap (SCH), S_i <= V,
 1410                  * and for all flows in not_eligible_heap (NEH), S_i > V.
 1411                  * So when we need to compute max(V, min(S_i)) forall i in
 1412                  * SCH+NEH, we only need to look into NEH.
 1413                  */
 1414                 if (DN_KEY_GT(q->S, pipe->V)) {         /* Not eligible. */
 1415                         if (pipe->scheduler_heap.elements == 0)
 1416                                 printf("dummynet: ++ ouch! not eligible but empty scheduler!\n");
 1417                         heap_insert(&(pipe->not_eligible_heap), q->S, q);
 1418                 } else {
 1419                         heap_insert(&(pipe->scheduler_heap), q->F, q);
 1420                         if (pipe->numbytes >= 0) {       /* Pipe is idle. */
 1421                                 if (pipe->scheduler_heap.elements != 1)
 1422                                         printf("dummynet: OUCH! pipe should have been idle!\n");
 1423                                 DPRINTF(("dummynet: waking up pipe %d at %d\n",
 1424                                     pipe->pipe_nr, (int)(q->F >> MY_M)));
 1425                                 pipe->sched_time = curr_time;
 1426                                 ready_event_wfq(pipe, &head, &tail);
 1427                         }
 1428                 }
 1429         }
 1430 done:
 1431         if (head == m && dir != DN_TO_IFB_FWD && dir != DN_TO_ETH_DEMUX &&
 1432             dir != DN_TO_ETH_OUT) {     /* Fast io. */
 1433                 io_pkt_fast++;
 1434                 if (m->m_nextpkt != NULL)
 1435                         printf("dummynet: fast io: pkt chain detected!\n");
 1436                 head = m->m_nextpkt = NULL;
 1437         } else
 1438                 *m0 = NULL;             /* Normal io. */
 1439 
 1440         DUMMYNET_UNLOCK();
 1441         if (head != NULL)
 1442                 dummynet_send(head);
 1443         return (0);
 1444 
 1445 dropit:
 1446         io_pkt_drop++;
 1447         if (q)
 1448                 q->drops++;
 1449         DUMMYNET_UNLOCK();
 1450         m_freem(m);
 1451         *m0 = NULL;
 1452         return ((fs && (fs->flags_fs & DN_NOERROR)) ? 0 : ENOBUFS);
 1453 }
 1454 
 1455 /*
 1456  * Below, the rt_unref is only needed when (pkt->dn_dir == DN_TO_IP_OUT)
 1457  * Doing this would probably save us the initial bzero of dn_pkt
 1458  */
 1459 #define DN_FREE_PKT(_m) do {                            \
 1460         m_freem(_m);                                    \
 1461 } while (0)
 1462 
 1463 /*
 1464  * Dispose all packets and flow_queues on a flow_set.
 1465  * If all=1, also remove red lookup table and other storage,
 1466  * including the descriptor itself.
 1467  * For the one in dn_pipe MUST also cleanup ready_heap...
 1468  */
 1469 static void
 1470 purge_flow_set(struct dn_flow_set *fs, int all)
 1471 {
 1472         struct dn_flow_queue *q, *qn;
 1473         int i;
 1474 
 1475         DUMMYNET_LOCK_ASSERT();
 1476 
 1477         for (i = 0; i <= fs->rq_size; i++) {
 1478                 for (q = fs->rq[i]; q != NULL; q = qn) {
 1479                         struct mbuf *m, *mnext;
 1480 
 1481                         mnext = q->head;
 1482                         while ((m = mnext) != NULL) {
 1483                                 mnext = m->m_nextpkt;
 1484                                 DN_FREE_PKT(m);
 1485                         }
 1486                         qn = q->next;
 1487                         free(q, M_DUMMYNET);
 1488                 }
 1489                 fs->rq[i] = NULL;
 1490         }
 1491 
 1492         fs->rq_elements = 0;
 1493         if (all) {
 1494                 /* RED - free lookup table. */
 1495                 if (fs->w_q_lookup != NULL)
 1496                         free(fs->w_q_lookup, M_DUMMYNET);
 1497                 if (fs->rq != NULL)
 1498                         free(fs->rq, M_DUMMYNET);
 1499                 /* If this fs is not part of a pipe, free it. */
 1500                 if (fs->pipe == NULL || fs != &(fs->pipe->fs))
 1501                         free(fs, M_DUMMYNET);
 1502         }
 1503 }
 1504 
 1505 /*
 1506  * Dispose all packets queued on a pipe (not a flow_set).
 1507  * Also free all resources associated to a pipe, which is about
 1508  * to be deleted.
 1509  */
 1510 static void
 1511 purge_pipe(struct dn_pipe *pipe)
 1512 {
 1513     struct mbuf *m, *mnext;
 1514 
 1515     purge_flow_set( &(pipe->fs), 1 );
 1516 
 1517     mnext = pipe->head;
 1518     while ((m = mnext) != NULL) {
 1519         mnext = m->m_nextpkt;
 1520         DN_FREE_PKT(m);
 1521     }
 1522 
 1523     heap_free( &(pipe->scheduler_heap) );
 1524     heap_free( &(pipe->not_eligible_heap) );
 1525     heap_free( &(pipe->idle_heap) );
 1526 }
 1527 
 1528 /*
 1529  * Delete all pipes and heaps returning memory. Must also
 1530  * remove references from all ipfw rules to all pipes.
 1531  */
 1532 static void
 1533 dummynet_flush(void)
 1534 {
 1535         struct dn_pipe *pipe, *pipe1;
 1536         struct dn_flow_set *fs, *fs1;
 1537         int i;
 1538 
 1539         DUMMYNET_LOCK();
 1540         /* Free heaps so we don't have unwanted events. */
 1541         heap_free(&ready_heap);
 1542         heap_free(&wfq_ready_heap);
 1543         heap_free(&extract_heap);
 1544 
 1545         /*
 1546          * Now purge all queued pkts and delete all pipes.
 1547          *
 1548          * XXXGL: can we merge the for(;;) cycles into one or not?
 1549          */
 1550         for (i = 0; i < HASHSIZE; i++)
 1551                 SLIST_FOREACH_SAFE(fs, &flowsethash[i], next, fs1) {
 1552                         SLIST_REMOVE(&flowsethash[i], fs, dn_flow_set, next);
 1553                         purge_flow_set(fs, 1);
 1554                 }
 1555         for (i = 0; i < HASHSIZE; i++)
 1556                 SLIST_FOREACH_SAFE(pipe, &pipehash[i], next, pipe1) {
 1557                         SLIST_REMOVE(&pipehash[i], pipe, dn_pipe, next);
 1558                         purge_pipe(pipe);
 1559                         free(pipe, M_DUMMYNET);
 1560                 }
 1561         DUMMYNET_UNLOCK();
 1562 }
 1563 
 1564 extern struct ip_fw *ip_fw_default_rule ;
 1565 static void
 1566 dn_rule_delete_fs(struct dn_flow_set *fs, void *r)
 1567 {
 1568     int i ;
 1569     struct dn_flow_queue *q ;
 1570     struct mbuf *m ;
 1571 
 1572     for (i = 0 ; i <= fs->rq_size ; i++) /* last one is ovflow */
 1573         for (q = fs->rq[i] ; q ; q = q->next )
 1574             for (m = q->head ; m ; m = m->m_nextpkt ) {
 1575                 struct dn_pkt_tag *pkt = dn_tag_get(m) ;
 1576                 if (pkt->rule == r)
 1577                     pkt->rule = ip_fw_default_rule ;
 1578             }
 1579 }
 1580 /*
 1581  * when a firewall rule is deleted, scan all queues and remove the flow-id
 1582  * from packets matching this rule.
 1583  */
 1584 void
 1585 dn_rule_delete(void *r)
 1586 {
 1587     struct dn_pipe *pipe;
 1588     struct dn_flow_set *fs;
 1589     struct dn_pkt_tag *pkt;
 1590     struct mbuf *m;
 1591     int i;
 1592 
 1593     DUMMYNET_LOCK();
 1594     /*
 1595      * If the rule references a queue (dn_flow_set), then scan
 1596      * the flow set, otherwise scan pipes. Should do either, but doing
 1597      * both does not harm.
 1598      */
 1599     for (i = 0; i < HASHSIZE; i++)
 1600         SLIST_FOREACH(fs, &flowsethash[i], next)
 1601                 dn_rule_delete_fs(fs, r);
 1602 
 1603     for (i = 0; i < HASHSIZE; i++)
 1604         SLIST_FOREACH(pipe, &pipehash[i], next) {
 1605                 fs = &(pipe->fs);
 1606                 dn_rule_delete_fs(fs, r);
 1607                 for (m = pipe->head ; m ; m = m->m_nextpkt ) {
 1608                         pkt = dn_tag_get(m);
 1609                         if (pkt->rule == r)
 1610                                 pkt->rule = ip_fw_default_rule;
 1611                 }
 1612         }
 1613     DUMMYNET_UNLOCK();
 1614 }
 1615 
 1616 /*
 1617  * setup RED parameters
 1618  */
 1619 static int
 1620 config_red(struct dn_flow_set *p, struct dn_flow_set *x)
 1621 {
 1622         int i;
 1623 
 1624         x->w_q = p->w_q;
 1625         x->min_th = SCALE(p->min_th);
 1626         x->max_th = SCALE(p->max_th);
 1627         x->max_p = p->max_p;
 1628 
 1629         x->c_1 = p->max_p / (p->max_th - p->min_th);
 1630         x->c_2 = SCALE_MUL(x->c_1, SCALE(p->min_th));
 1631 
 1632         if (x->flags_fs & DN_IS_GENTLE_RED) {
 1633                 x->c_3 = (SCALE(1) - p->max_p) / p->max_th;
 1634                 x->c_4 = SCALE(1) - 2 * p->max_p;
 1635         }
 1636 
 1637         /* If the lookup table already exist, free and create it again. */
 1638         if (x->w_q_lookup) {
 1639                 free(x->w_q_lookup, M_DUMMYNET);
 1640                 x->w_q_lookup = NULL;
 1641         }
 1642         if (red_lookup_depth == 0) {
 1643                 printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth"
 1644                     "must be > 0\n");
 1645                 free(x, M_DUMMYNET);
 1646                 return (EINVAL);
 1647         }
 1648         x->lookup_depth = red_lookup_depth;
 1649         x->w_q_lookup = (u_int *)malloc(x->lookup_depth * sizeof(int),
 1650             M_DUMMYNET, M_NOWAIT);
 1651         if (x->w_q_lookup == NULL) {
 1652                 printf("dummynet: sorry, cannot allocate red lookup table\n");
 1653                 free(x, M_DUMMYNET);
 1654                 return(ENOSPC);
 1655         }
 1656 
 1657         /* Fill the lookup table with (1 - w_q)^x */
 1658         x->lookup_step = p->lookup_step;
 1659         x->lookup_weight = p->lookup_weight;
 1660         x->w_q_lookup[0] = SCALE(1) - x->w_q;
 1661 
 1662         for (i = 1; i < x->lookup_depth; i++)
 1663                 x->w_q_lookup[i] =
 1664                     SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
 1665 
 1666         if (red_avg_pkt_size < 1)
 1667                 red_avg_pkt_size = 512;
 1668         x->avg_pkt_size = red_avg_pkt_size;
 1669         if (red_max_pkt_size < 1)
 1670                 red_max_pkt_size = 1500;
 1671         x->max_pkt_size = red_max_pkt_size;
 1672         return (0);
 1673 }
 1674 
 1675 static int
 1676 alloc_hash(struct dn_flow_set *x, struct dn_flow_set *pfs)
 1677 {
 1678     if (x->flags_fs & DN_HAVE_FLOW_MASK) {     /* allocate some slots */
 1679         int l = pfs->rq_size;
 1680 
 1681         if (l == 0)
 1682             l = dn_hash_size;
 1683         if (l < 4)
 1684             l = 4;
 1685         else if (l > DN_MAX_HASH_SIZE)
 1686             l = DN_MAX_HASH_SIZE;
 1687         x->rq_size = l;
 1688     } else                  /* one is enough for null mask */
 1689         x->rq_size = 1;
 1690     x->rq = malloc((1 + x->rq_size) * sizeof(struct dn_flow_queue *),
 1691             M_DUMMYNET, M_NOWAIT | M_ZERO);
 1692     if (x->rq == NULL) {
 1693         printf("dummynet: sorry, cannot allocate queue\n");
 1694         return (ENOMEM);
 1695     }
 1696     x->rq_elements = 0;
 1697     return 0 ;
 1698 }
 1699 
 1700 static void
 1701 set_fs_parms(struct dn_flow_set *x, struct dn_flow_set *src)
 1702 {
 1703         x->flags_fs = src->flags_fs;
 1704         x->qsize = src->qsize;
 1705         x->plr = src->plr;
 1706         x->flow_mask = src->flow_mask;
 1707         if (x->flags_fs & DN_QSIZE_IS_BYTES) {
 1708                 if (x->qsize > pipe_byte_limit)
 1709                         x->qsize = 1024 * 1024;
 1710         } else {
 1711                 if (x->qsize == 0)
 1712                         x->qsize = 50;
 1713                 if (x->qsize > pipe_slot_limit)
 1714                         x->qsize = 50;
 1715         }
 1716         /* Configuring RED. */
 1717         if (x->flags_fs & DN_IS_RED)
 1718                 config_red(src, x);     /* XXX should check errors */
 1719 }
 1720 
 1721 /*
 1722  * Setup pipe or queue parameters.
 1723  */
 1724 static int
 1725 config_pipe(struct dn_pipe *p)
 1726 {
 1727         struct dn_flow_set *pfs = &(p->fs);
 1728         struct dn_flow_queue *q;
 1729         int i, error;
 1730 
 1731         /*
 1732          * The config program passes parameters as follows:
 1733          * bw = bits/second (0 means no limits),
 1734          * delay = ms, must be translated into ticks.
 1735          * qsize = slots/bytes
 1736          */
 1737         p->delay = (p->delay * hz) / 1000;
 1738         /* We need either a pipe number or a flow_set number. */
 1739         if (p->pipe_nr == 0 && pfs->fs_nr == 0)
 1740                 return (EINVAL);
 1741         if (p->pipe_nr != 0 && pfs->fs_nr != 0)
 1742                 return (EINVAL);
 1743         if (p->pipe_nr != 0) {                  /* this is a pipe */
 1744                 struct dn_pipe *pipe;
 1745 
 1746                 DUMMYNET_LOCK();
 1747                 pipe = locate_pipe(p->pipe_nr); /* locate pipe */
 1748 
 1749                 if (pipe == NULL) {             /* new pipe */
 1750                         pipe = malloc(sizeof(struct dn_pipe), M_DUMMYNET,
 1751                             M_NOWAIT | M_ZERO);
 1752                         if (pipe == NULL) {
 1753                                 DUMMYNET_UNLOCK();
 1754                                 printf("dummynet: no memory for new pipe\n");
 1755                                 return (ENOMEM);
 1756                         }
 1757                         pipe->pipe_nr = p->pipe_nr;
 1758                         pipe->fs.pipe = pipe;
 1759                         /*
 1760                          * idle_heap is the only one from which
 1761                          * we extract from the middle.
 1762                          */
 1763                         pipe->idle_heap.size = pipe->idle_heap.elements = 0;
 1764                         pipe->idle_heap.offset =
 1765                             offsetof(struct dn_flow_queue, heap_pos);
 1766                 } else
 1767                         /* Flush accumulated credit for all queues. */
 1768                         for (i = 0; i <= pipe->fs.rq_size; i++)
 1769                                 for (q = pipe->fs.rq[i]; q; q = q->next)
 1770                                         q->numbytes = io_fast ? p->bandwidth : 0;
 1771 
 1772                 pipe->bandwidth = p->bandwidth;
 1773                 pipe->numbytes = 0;             /* just in case... */
 1774                 bcopy(p->if_name, pipe->if_name, sizeof(p->if_name));
 1775                 pipe->ifp = NULL;               /* reset interface ptr */
 1776                 pipe->delay = p->delay;
 1777                 set_fs_parms(&(pipe->fs), pfs);
 1778 
 1779                 if (pipe->fs.rq == NULL) {      /* a new pipe */
 1780                         error = alloc_hash(&(pipe->fs), pfs);
 1781                         if (error) {
 1782                                 DUMMYNET_UNLOCK();
 1783                                 free(pipe, M_DUMMYNET);
 1784                                 return (error);
 1785                         }
 1786                         SLIST_INSERT_HEAD(&pipehash[HASH(pipe->pipe_nr)],
 1787                             pipe, next);
 1788                 }
 1789                 DUMMYNET_UNLOCK();
 1790         } else {                                /* config queue */
 1791                 struct dn_flow_set *fs;
 1792 
 1793                 DUMMYNET_LOCK();
 1794                 fs = locate_flowset(pfs->fs_nr); /* locate flow_set */
 1795 
 1796                 if (fs == NULL) {               /* new */
 1797                         if (pfs->parent_nr == 0) { /* need link to a pipe */
 1798                                 DUMMYNET_UNLOCK();
 1799                                 return (EINVAL);
 1800                         }
 1801                         fs = malloc(sizeof(struct dn_flow_set), M_DUMMYNET,
 1802                             M_NOWAIT | M_ZERO);
 1803                         if (fs == NULL) {
 1804                                 DUMMYNET_UNLOCK();
 1805                                 printf(
 1806                                     "dummynet: no memory for new flow_set\n");
 1807                                 return (ENOMEM);
 1808                         }
 1809                         fs->fs_nr = pfs->fs_nr;
 1810                         fs->parent_nr = pfs->parent_nr;
 1811                         fs->weight = pfs->weight;
 1812                         if (fs->weight == 0)
 1813                                 fs->weight = 1;
 1814                         else if (fs->weight > 100)
 1815                                 fs->weight = 100;
 1816                 } else {
 1817                         /*
 1818                          * Change parent pipe not allowed;
 1819                          * must delete and recreate.
 1820                          */
 1821                         if (pfs->parent_nr != 0 &&
 1822                             fs->parent_nr != pfs->parent_nr) {
 1823                                 DUMMYNET_UNLOCK();
 1824                                 return (EINVAL);
 1825                         }
 1826                 }
 1827 
 1828                 set_fs_parms(fs, pfs);
 1829 
 1830                 if (fs->rq == NULL) {           /* a new flow_set */
 1831                         error = alloc_hash(fs, pfs);
 1832                         if (error) {
 1833                                 DUMMYNET_UNLOCK();
 1834                                 free(fs, M_DUMMYNET);
 1835                                 return (error);
 1836                         }
 1837                         SLIST_INSERT_HEAD(&flowsethash[HASH(fs->fs_nr)],
 1838                             fs, next);
 1839                 }
 1840                 DUMMYNET_UNLOCK();
 1841         }
 1842         return (0);
 1843 }
 1844 
 1845 /*
 1846  * Helper function to remove from a heap queues which are linked to
 1847  * a flow_set about to be deleted.
 1848  */
 1849 static void
 1850 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
 1851 {
 1852     int i = 0, found = 0 ;
 1853     for (; i < h->elements ;)
 1854         if ( ((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
 1855             h->elements-- ;
 1856             h->p[i] = h->p[h->elements] ;
 1857             found++ ;
 1858         } else
 1859             i++ ;
 1860     if (found)
 1861         heapify(h);
 1862 }
 1863 
 1864 /*
 1865  * helper function to remove a pipe from a heap (can be there at most once)
 1866  */
 1867 static void
 1868 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
 1869 {
 1870     if (h->elements > 0) {
 1871         int i = 0 ;
 1872         for (i=0; i < h->elements ; i++ ) {
 1873             if (h->p[i].object == p) { /* found it */
 1874                 h->elements-- ;
 1875                 h->p[i] = h->p[h->elements] ;
 1876                 heapify(h);
 1877                 break ;
 1878             }
 1879         }
 1880     }
 1881 }
 1882 
 1883 /*
 1884  * drain all queues. Called in case of severe mbuf shortage.
 1885  */
 1886 void
 1887 dummynet_drain(void)
 1888 {
 1889     struct dn_flow_set *fs;
 1890     struct dn_pipe *pipe;
 1891     struct mbuf *m, *mnext;
 1892     int i;
 1893 
 1894     DUMMYNET_LOCK_ASSERT();
 1895 
 1896     heap_free(&ready_heap);
 1897     heap_free(&wfq_ready_heap);
 1898     heap_free(&extract_heap);
 1899     /* remove all references to this pipe from flow_sets */
 1900     for (i = 0; i < HASHSIZE; i++)
 1901         SLIST_FOREACH(fs, &flowsethash[i], next)
 1902                 purge_flow_set(fs, 0);
 1903 
 1904     for (i = 0; i < HASHSIZE; i++) {
 1905         SLIST_FOREACH(pipe, &pipehash[i], next) {
 1906                 purge_flow_set(&(pipe->fs), 0);
 1907 
 1908                 mnext = pipe->head;
 1909                 while ((m = mnext) != NULL) {
 1910                         mnext = m->m_nextpkt;
 1911                         DN_FREE_PKT(m);
 1912                 }
 1913                 pipe->head = pipe->tail = NULL;
 1914         }
 1915     }
 1916 }
 1917 
 1918 /*
 1919  * Fully delete a pipe or a queue, cleaning up associated info.
 1920  */
 1921 static int
 1922 delete_pipe(struct dn_pipe *p)
 1923 {
 1924 
 1925     if (p->pipe_nr == 0 && p->fs.fs_nr == 0)
 1926         return EINVAL ;
 1927     if (p->pipe_nr != 0 && p->fs.fs_nr != 0)
 1928         return EINVAL ;
 1929     if (p->pipe_nr != 0) { /* this is an old-style pipe */
 1930         struct dn_pipe *pipe;
 1931         struct dn_flow_set *fs;
 1932         int i;
 1933 
 1934         DUMMYNET_LOCK();
 1935         pipe = locate_pipe(p->pipe_nr); /* locate pipe */
 1936 
 1937         if (pipe == NULL) {
 1938             DUMMYNET_UNLOCK();
 1939             return (ENOENT);    /* not found */
 1940         }
 1941 
 1942         /* Unlink from list of pipes. */
 1943         SLIST_REMOVE(&pipehash[HASH(pipe->pipe_nr)], pipe, dn_pipe, next);
 1944 
 1945         /* Remove all references to this pipe from flow_sets. */
 1946         for (i = 0; i < HASHSIZE; i++)
 1947             SLIST_FOREACH(fs, &flowsethash[i], next)
 1948                 if (fs->pipe == pipe) {
 1949                         printf("dummynet: ++ ref to pipe %d from fs %d\n",
 1950                             p->pipe_nr, fs->fs_nr);
 1951                         fs->pipe = NULL ;
 1952                         purge_flow_set(fs, 0);
 1953                 }
 1954         fs_remove_from_heap(&ready_heap, &(pipe->fs));
 1955         purge_pipe(pipe); /* remove all data associated to this pipe */
 1956         /* remove reference to here from extract_heap and wfq_ready_heap */
 1957         pipe_remove_from_heap(&extract_heap, pipe);
 1958         pipe_remove_from_heap(&wfq_ready_heap, pipe);
 1959         DUMMYNET_UNLOCK();
 1960 
 1961         free(pipe, M_DUMMYNET);
 1962     } else { /* this is a WF2Q queue (dn_flow_set) */
 1963         struct dn_flow_set *fs;
 1964 
 1965         DUMMYNET_LOCK();
 1966         fs = locate_flowset(p->fs.fs_nr); /* locate set */
 1967 
 1968         if (fs == NULL) {
 1969             DUMMYNET_UNLOCK();
 1970             return (ENOENT); /* not found */
 1971         }
 1972 
 1973         /* Unlink from list of flowsets. */
 1974         SLIST_REMOVE( &flowsethash[HASH(fs->fs_nr)], fs, dn_flow_set, next);
 1975 
 1976         if (fs->pipe != NULL) {
 1977             /* Update total weight on parent pipe and cleanup parent heaps. */
 1978             fs->pipe->sum -= fs->weight * fs->backlogged ;
 1979             fs_remove_from_heap(&(fs->pipe->not_eligible_heap), fs);
 1980             fs_remove_from_heap(&(fs->pipe->scheduler_heap), fs);
 1981 #if 1   /* XXX should i remove from idle_heap as well ? */
 1982             fs_remove_from_heap(&(fs->pipe->idle_heap), fs);
 1983 #endif
 1984         }
 1985         purge_flow_set(fs, 1);
 1986         DUMMYNET_UNLOCK();
 1987     }
 1988     return 0 ;
 1989 }
 1990 
 1991 /*
 1992  * helper function used to copy data from kernel in DUMMYNET_GET
 1993  */
 1994 static char *
 1995 dn_copy_set(struct dn_flow_set *set, char *bp)
 1996 {
 1997     int i, copied = 0 ;
 1998     struct dn_flow_queue *q, *qp = (struct dn_flow_queue *)bp;
 1999 
 2000     DUMMYNET_LOCK_ASSERT();
 2001 
 2002     for (i = 0 ; i <= set->rq_size ; i++)
 2003         for (q = set->rq[i] ; q ; q = q->next, qp++ ) {
 2004             if (q->hash_slot != i)
 2005                 printf("dummynet: ++ at %d: wrong slot (have %d, "
 2006                     "should be %d)\n", copied, q->hash_slot, i);
 2007             if (q->fs != set)
 2008                 printf("dummynet: ++ at %d: wrong fs ptr (have %p, should be %p)\n",
 2009                         i, q->fs, set);
 2010             copied++ ;
 2011             bcopy(q, qp, sizeof( *q ) );
 2012             /* cleanup pointers */
 2013             qp->next = NULL ;
 2014             qp->head = qp->tail = NULL ;
 2015             qp->fs = NULL ;
 2016         }
 2017     if (copied != set->rq_elements)
 2018         printf("dummynet: ++ wrong count, have %d should be %d\n",
 2019             copied, set->rq_elements);
 2020     return (char *)qp ;
 2021 }
 2022 
 2023 static size_t
 2024 dn_calc_size(void)
 2025 {
 2026     struct dn_flow_set *fs;
 2027     struct dn_pipe *pipe;
 2028     size_t size = 0;
 2029     int i;
 2030 
 2031     DUMMYNET_LOCK_ASSERT();
 2032     /*
 2033      * Compute size of data structures: list of pipes and flow_sets.
 2034      */
 2035     for (i = 0; i < HASHSIZE; i++) {
 2036         SLIST_FOREACH(pipe, &pipehash[i], next)
 2037                 size += sizeof(*pipe) +
 2038                     pipe->fs.rq_elements * sizeof(struct dn_flow_queue);
 2039         SLIST_FOREACH(fs, &flowsethash[i], next)
 2040                 size += sizeof (*fs) +
 2041                     fs->rq_elements * sizeof(struct dn_flow_queue);
 2042     }
 2043     return size;
 2044 }
 2045 
 2046 static int
 2047 dummynet_get(struct sockopt *sopt)
 2048 {
 2049     char *buf, *bp ; /* bp is the "copy-pointer" */
 2050     size_t size ;
 2051     struct dn_flow_set *fs;
 2052     struct dn_pipe *pipe;
 2053     int error=0, i ;
 2054 
 2055     /* XXX lock held too long */
 2056     DUMMYNET_LOCK();
 2057     /*
 2058      * XXX: Ugly, but we need to allocate memory with M_WAITOK flag and we
 2059      *      cannot use this flag while holding a mutex.
 2060      */
 2061     for (i = 0; i < 10; i++) {
 2062         size = dn_calc_size();
 2063         DUMMYNET_UNLOCK();
 2064         buf = malloc(size, M_TEMP, M_WAITOK);
 2065         DUMMYNET_LOCK();
 2066         if (size == dn_calc_size())
 2067                 break;
 2068         free(buf, M_TEMP);
 2069         buf = NULL;
 2070     }
 2071     if (buf == NULL) {
 2072         DUMMYNET_UNLOCK();
 2073         return ENOBUFS ;
 2074     }
 2075     bp = buf;
 2076     for (i = 0; i < HASHSIZE; i++)
 2077         SLIST_FOREACH(pipe, &pipehash[i], next) {
 2078                 struct dn_pipe *pipe_bp = (struct dn_pipe *)bp;
 2079 
 2080                 /*
 2081                  * Copy pipe descriptor into *bp, convert delay back to ms,
 2082                  * then copy the flow_set descriptor(s) one at a time.
 2083                  * After each flow_set, copy the queue descriptor it owns.
 2084                  */
 2085                 bcopy(pipe, bp, sizeof(*pipe));
 2086                 pipe_bp->delay = (pipe_bp->delay * 1000) / hz;
 2087                 /*
 2088                  * XXX the following is a hack based on ->next being the
 2089                  * first field in dn_pipe and dn_flow_set. The correct
 2090                  * solution would be to move the dn_flow_set to the beginning
 2091                  * of struct dn_pipe.
 2092                  */
 2093                 pipe_bp->next.sle_next = (struct dn_pipe *)DN_IS_PIPE;
 2094                 /* Clean pointers. */
 2095                 pipe_bp->head = pipe_bp->tail = NULL;
 2096                 pipe_bp->fs.next.sle_next = NULL;
 2097                 pipe_bp->fs.pipe = NULL;
 2098                 pipe_bp->fs.rq = NULL;
 2099 
 2100                 bp += sizeof(*pipe) ;
 2101                 bp = dn_copy_set(&(pipe->fs), bp);
 2102         }
 2103 
 2104     for (i = 0; i < HASHSIZE; i++)
 2105         SLIST_FOREACH(fs, &flowsethash[i], next) {
 2106                 struct dn_flow_set *fs_bp = (struct dn_flow_set *)bp;
 2107 
 2108                 bcopy(fs, bp, sizeof(*fs));
 2109                 /* XXX same hack as above */
 2110                 fs_bp->next.sle_next = (struct dn_flow_set *)DN_IS_QUEUE;
 2111                 fs_bp->pipe = NULL;
 2112                 fs_bp->rq = NULL;
 2113                 bp += sizeof(*fs);
 2114                 bp = dn_copy_set(fs, bp);
 2115         }
 2116 
 2117     DUMMYNET_UNLOCK();
 2118 
 2119     error = sooptcopyout(sopt, buf, size);
 2120     free(buf, M_TEMP);
 2121     return error ;
 2122 }
 2123 
 2124 /*
 2125  * Handler for the various dummynet socket options (get, flush, config, del)
 2126  */
 2127 static int
 2128 ip_dn_ctl(struct sockopt *sopt)
 2129 {
 2130     int error = 0 ;
 2131     struct dn_pipe *p, tmp_pipe;
 2132 
 2133     error = priv_check(sopt->sopt_td, PRIV_NETINET_DUMMYNET);
 2134     if (error)
 2135         return (error);
 2136 
 2137     /* Disallow sets in really-really secure mode. */
 2138     if (sopt->sopt_dir == SOPT_SET) {
 2139 #if __FreeBSD_version >= 500034
 2140         error =  securelevel_ge(sopt->sopt_td->td_ucred, 3);
 2141         if (error)
 2142             return (error);
 2143 #else
 2144         if (securelevel >= 3)
 2145             return (EPERM);
 2146 #endif
 2147     }
 2148 
 2149     switch (sopt->sopt_name) {
 2150     default :
 2151         printf("dummynet: -- unknown option %d", sopt->sopt_name);
 2152         return EINVAL ;
 2153 
 2154     case IP_DUMMYNET_GET :
 2155         error = dummynet_get(sopt);
 2156         break ;
 2157 
 2158     case IP_DUMMYNET_FLUSH :
 2159         dummynet_flush() ;
 2160         break ;
 2161 
 2162     case IP_DUMMYNET_CONFIGURE :
 2163         p = &tmp_pipe ;
 2164         error = sooptcopyin(sopt, p, sizeof *p, sizeof *p);
 2165         if (error)
 2166             break ;
 2167         error = config_pipe(p);
 2168         break ;
 2169 
 2170     case IP_DUMMYNET_DEL :      /* remove a pipe or queue */
 2171         p = &tmp_pipe ;
 2172         error = sooptcopyin(sopt, p, sizeof *p, sizeof *p);
 2173         if (error)
 2174             break ;
 2175 
 2176         error = delete_pipe(p);
 2177         break ;
 2178     }
 2179     return error ;
 2180 }
 2181 
 2182 static void
 2183 ip_dn_init(void)
 2184 {
 2185         int i;
 2186 
 2187         if (bootverbose)
 2188                 printf("DUMMYNET with IPv6 initialized (040826)\n");
 2189 
 2190         DUMMYNET_LOCK_INIT();
 2191 
 2192         for (i = 0; i < HASHSIZE; i++) {
 2193                 SLIST_INIT(&pipehash[i]);
 2194                 SLIST_INIT(&flowsethash[i]);
 2195         }
 2196         ready_heap.size = ready_heap.elements = 0;
 2197         ready_heap.offset = 0;
 2198 
 2199         wfq_ready_heap.size = wfq_ready_heap.elements = 0;
 2200         wfq_ready_heap.offset = 0;
 2201 
 2202         extract_heap.size = extract_heap.elements = 0;
 2203         extract_heap.offset = 0;
 2204 
 2205         ip_dn_ctl_ptr = ip_dn_ctl;
 2206         ip_dn_io_ptr = dummynet_io;
 2207         ip_dn_ruledel_ptr = dn_rule_delete;
 2208 
 2209         TASK_INIT(&dn_task, 0, dummynet_task, NULL);
 2210         dn_tq = taskqueue_create_fast("dummynet", M_NOWAIT,
 2211             taskqueue_thread_enqueue, &dn_tq);
 2212         taskqueue_start_threads(&dn_tq, 1, PI_NET, "dummynet");
 2213 
 2214         callout_init(&dn_timeout, CALLOUT_MPSAFE);
 2215         callout_reset(&dn_timeout, 1, dummynet, NULL);
 2216 
 2217         /* Initialize curr_time adjustment mechanics. */
 2218         getmicrouptime(&prev_t);
 2219 }
 2220 
 2221 #ifdef KLD_MODULE
 2222 static void
 2223 ip_dn_destroy(void)
 2224 {
 2225         ip_dn_ctl_ptr = NULL;
 2226         ip_dn_io_ptr = NULL;
 2227         ip_dn_ruledel_ptr = NULL;
 2228 
 2229         DUMMYNET_LOCK();
 2230         callout_stop(&dn_timeout);
 2231         DUMMYNET_UNLOCK();
 2232         taskqueue_drain(dn_tq, &dn_task);
 2233         taskqueue_free(dn_tq);
 2234 
 2235         dummynet_flush();
 2236 
 2237         DUMMYNET_LOCK_DESTROY();
 2238 }
 2239 #endif /* KLD_MODULE */
 2240 
 2241 static int
 2242 dummynet_modevent(module_t mod, int type, void *data)
 2243 {
 2244 
 2245         switch (type) {
 2246         case MOD_LOAD:
 2247                 if (DUMMYNET_LOADED) {
 2248                     printf("DUMMYNET already loaded\n");
 2249                     return EEXIST ;
 2250                 }
 2251                 ip_dn_init();
 2252                 break;
 2253 
 2254         case MOD_UNLOAD:
 2255 #if !defined(KLD_MODULE)
 2256                 printf("dummynet statically compiled, cannot unload\n");
 2257                 return EINVAL ;
 2258 #else
 2259                 ip_dn_destroy();
 2260 #endif
 2261                 break ;
 2262         default:
 2263                 return EOPNOTSUPP;
 2264                 break ;
 2265         }
 2266         return 0 ;
 2267 }
 2268 
 2269 static moduledata_t dummynet_mod = {
 2270         "dummynet",
 2271         dummynet_modevent,
 2272         NULL
 2273 };
 2274 DECLARE_MODULE(dummynet, dummynet_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY);
 2275 MODULE_DEPEND(dummynet, ipfw, 2, 2, 2);
 2276 MODULE_VERSION(dummynet, 1);

Cache object: 74841703d2552a7cc03bdc57a873da7e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.