The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet/ip_dummynet.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
    3  * Portions Copyright (c) 2000 Akamba Corp.
    4  * All rights reserved
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  *
   27  * $FreeBSD: releng/6.3/sys/netinet/ip_dummynet.c 167770 2007-03-21 17:25:15Z oleg $
   28  */
   29 
   30 #define DUMMYNET_DEBUG
   31 
   32 #include "opt_inet6.h"
   33 
   34 /*
   35  * This module implements IP dummynet, a bandwidth limiter/delay emulator
   36  * used in conjunction with the ipfw package.
   37  * Description of the data structures used is in ip_dummynet.h
   38  * Here you mainly find the following blocks of code:
   39  *  + variable declarations;
   40  *  + heap management functions;
   41  *  + scheduler and dummynet functions;
   42  *  + configuration and initialization.
   43  *
   44  * NOTA BENE: critical sections are protected by the "dummynet lock".
   45  *
   46  * Most important Changes:
   47  *
   48  * 011004: KLDable
   49  * 010124: Fixed WF2Q behaviour
   50  * 010122: Fixed spl protection.
   51  * 000601: WF2Q support
   52  * 000106: large rewrite, use heaps to handle very many pipes.
   53  * 980513:      initial release
   54  *
   55  * include files marked with XXX are probably not needed
   56  */
   57 
   58 #include <sys/param.h>
   59 #include <sys/systm.h>
   60 #include <sys/malloc.h>
   61 #include <sys/mbuf.h>
   62 #include <sys/kernel.h>
   63 #include <sys/module.h>
   64 #include <sys/proc.h>
   65 #include <sys/socket.h>
   66 #include <sys/socketvar.h>
   67 #include <sys/time.h>
   68 #include <sys/sysctl.h>
   69 #include <sys/taskqueue.h>
   70 #include <net/if.h>
   71 #include <net/netisr.h>
   72 #include <net/route.h>
   73 #include <netinet/in.h>
   74 #include <netinet/in_systm.h>
   75 #include <netinet/in_var.h>
   76 #include <netinet/ip.h>
   77 #include <netinet/ip_fw.h>
   78 #include <netinet/ip_dummynet.h>
   79 #include <netinet/ip_var.h>
   80 
   81 #include <netinet/if_ether.h> /* for struct arpcom */
   82 #include <net/bridge.h>
   83 
   84 #include <netinet/ip6.h>       /* for ip6_input, ip6_output prototypes */
   85 #include <netinet6/ip6_var.h>
   86 
   87 /*
   88  * We keep a private variable for the simulation time, but we could
   89  * probably use an existing one ("softticks" in sys/kern/kern_timeout.c)
   90  */
   91 static dn_key curr_time = 0 ; /* current simulation time */
   92 
   93 static int dn_hash_size = 64 ;  /* default hash size */
   94 
   95 /* statistics on number of queue searches and search steps */
   96 static long searches, search_steps ;
   97 static int pipe_expire = 1 ;   /* expire queue if empty */
   98 static int dn_max_ratio = 16 ; /* max queues/buckets ratio */
   99 
  100 static int red_lookup_depth = 256;      /* RED - default lookup table depth */
  101 static int red_avg_pkt_size = 512;      /* RED - default medium packet size */
  102 static int red_max_pkt_size = 1500;     /* RED - default max packet size */
  103 
  104 static struct timeval prev_t, t;
  105 static long tick_last;                  /* Last tick duration (usec). */
  106 static long tick_delta;                 /* Last vs standard tick diff (usec). */
  107 static long tick_delta_sum;             /* Accumulated tick difference (usec).*/
  108 static long tick_adjustment;            /* Tick adjustments done. */
  109 static long tick_lost;                  /* Lost(coalesced) ticks number. */
  110 /* Adjusted vs non-adjusted curr_time difference (ticks). */
  111 static long tick_diff;
  112 
  113 /*
  114  * Three heaps contain queues and pipes that the scheduler handles:
  115  *
  116  * ready_heap contains all dn_flow_queue related to fixed-rate pipes.
  117  *
  118  * wfq_ready_heap contains the pipes associated with WF2Q flows
  119  *
  120  * extract_heap contains pipes associated with delay lines.
  121  *
  122  */
  123 
  124 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
  125 
  126 static struct dn_heap ready_heap, extract_heap, wfq_ready_heap ;
  127 
  128 static int      heap_init(struct dn_heap *h, int size);
  129 static int      heap_insert (struct dn_heap *h, dn_key key1, void *p);
  130 static void     heap_extract(struct dn_heap *h, void *obj);
  131 static void     transmit_event(struct dn_pipe *pipe, struct mbuf **head,
  132                     struct mbuf **tail);
  133 static void     ready_event(struct dn_flow_queue *q, struct mbuf **head,
  134                     struct mbuf **tail);
  135 static void     ready_event_wfq(struct dn_pipe *p, struct mbuf **head,
  136                     struct mbuf **tail);
  137 
  138 #define HASHSIZE        16
  139 #define HASH(num)       ((((num) >> 8) ^ ((num) >> 4) ^ (num)) & 0x0f)
  140 static struct dn_pipe_head      pipehash[HASHSIZE];     /* all pipes */
  141 static struct dn_flow_set_head  flowsethash[HASHSIZE];  /* all flowsets */
  142 
  143 static struct callout dn_timeout;
  144 
  145 extern  void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
  146 
  147 #ifdef SYSCTL_NODE
  148 SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW, 0, "Dummynet");
  149 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size,
  150     CTLFLAG_RW, &dn_hash_size, 0, "Default hash table size");
  151 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, curr_time,
  152     CTLFLAG_RD, &curr_time, 0, "Current tick");
  153 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap,
  154     CTLFLAG_RD, &ready_heap.size, 0, "Size of ready heap");
  155 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap,
  156     CTLFLAG_RD, &extract_heap.size, 0, "Size of extract heap");
  157 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, searches,
  158     CTLFLAG_RD, &searches, 0, "Number of queue searches");
  159 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, search_steps,
  160     CTLFLAG_RD, &search_steps, 0, "Number of queue search steps");
  161 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire,
  162     CTLFLAG_RW, &pipe_expire, 0, "Expire queue if empty");
  163 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len,
  164     CTLFLAG_RW, &dn_max_ratio, 0,
  165     "Max ratio between dynamic queues and buckets");
  166 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
  167     CTLFLAG_RD, &red_lookup_depth, 0, "Depth of RED lookup table");
  168 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
  169     CTLFLAG_RD, &red_avg_pkt_size, 0, "RED Medium packet size");
  170 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
  171     CTLFLAG_RD, &red_max_pkt_size, 0, "RED Max packet size");
  172 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta,
  173     CTLFLAG_RD, &tick_delta, 0, "Last vs standard tick difference (usec).");
  174 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum,
  175     CTLFLAG_RD, &tick_delta_sum, 0, "Accumulated tick difference (usec).");
  176 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment,
  177     CTLFLAG_RD, &tick_adjustment, 0, "Tick adjustments done.");
  178 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff,
  179     CTLFLAG_RD, &tick_diff, 0,
  180     "Adjusted vs non-adjusted curr_time difference (ticks).");
  181 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost,
  182     CTLFLAG_RD, &tick_lost, 0,
  183     "Number of ticks coalesced by dummynet taskqueue.");
  184 #endif
  185 
  186 #ifdef DUMMYNET_DEBUG
  187 int     dummynet_debug = 0;
  188 #ifdef SYSCTL_NODE
  189 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW, &dummynet_debug,
  190             0, "control debugging printfs");
  191 #endif
  192 #define DPRINTF(X)      if (dummynet_debug) printf X
  193 #else
  194 #define DPRINTF(X)
  195 #endif
  196 
  197 static struct task      dn_task;
  198 static struct taskqueue *dn_tq = NULL;
  199 static void dummynet_task(void *, int);
  200 
  201 static struct mtx dummynet_mtx;
  202 #define DUMMYNET_LOCK_INIT() \
  203         mtx_init(&dummynet_mtx, "dummynet", NULL, MTX_DEF)
  204 #define DUMMYNET_LOCK_DESTROY() mtx_destroy(&dummynet_mtx)
  205 #define DUMMYNET_LOCK()         mtx_lock(&dummynet_mtx)
  206 #define DUMMYNET_UNLOCK()       mtx_unlock(&dummynet_mtx)
  207 #define DUMMYNET_LOCK_ASSERT()  do {                            \
  208         mtx_assert(&dummynet_mtx, MA_OWNED);                    \
  209         NET_ASSERT_GIANT();                                     \
  210 } while (0)
  211 
  212 static int config_pipe(struct dn_pipe *p);
  213 static int ip_dn_ctl(struct sockopt *sopt);
  214 
  215 static void dummynet(void *);
  216 static void dummynet_flush(void);
  217 static void dummynet_send(struct mbuf *);
  218 void dummynet_drain(void);
  219 static ip_dn_io_t dummynet_io;
  220 static void dn_rule_delete(void *);
  221 
  222 /*
  223  * Heap management functions.
  224  *
  225  * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
  226  * Some macros help finding parent/children so we can optimize them.
  227  *
  228  * heap_init() is called to expand the heap when needed.
  229  * Increment size in blocks of 16 entries.
  230  * XXX failure to allocate a new element is a pretty bad failure
  231  * as we basically stall a whole queue forever!!
  232  * Returns 1 on error, 0 on success
  233  */
  234 #define HEAP_FATHER(x) ( ( (x) - 1 ) / 2 )
  235 #define HEAP_LEFT(x) ( 2*(x) + 1 )
  236 #define HEAP_IS_LEFT(x) ( (x) & 1 )
  237 #define HEAP_RIGHT(x) ( 2*(x) + 2 )
  238 #define HEAP_SWAP(a, b, buffer) { buffer = a ; a = b ; b = buffer ; }
  239 #define HEAP_INCREMENT  15
  240 
  241 static int
  242 heap_init(struct dn_heap *h, int new_size)
  243 {
  244     struct dn_heap_entry *p;
  245 
  246     if (h->size >= new_size ) {
  247         printf("dummynet: %s, Bogus call, have %d want %d\n", __func__,
  248                 h->size, new_size);
  249         return 0 ;
  250     }
  251     new_size = (new_size + HEAP_INCREMENT ) & ~HEAP_INCREMENT ;
  252     p = malloc(new_size * sizeof(*p), M_DUMMYNET, M_NOWAIT);
  253     if (p == NULL) {
  254         printf("dummynet: %s, resize %d failed\n", __func__, new_size );
  255         return 1 ; /* error */
  256     }
  257     if (h->size > 0) {
  258         bcopy(h->p, p, h->size * sizeof(*p) );
  259         free(h->p, M_DUMMYNET);
  260     }
  261     h->p = p ;
  262     h->size = new_size ;
  263     return 0 ;
  264 }
  265 
  266 /*
  267  * Insert element in heap. Normally, p != NULL, we insert p in
  268  * a new position and bubble up. If p == NULL, then the element is
  269  * already in place, and key is the position where to start the
  270  * bubble-up.
  271  * Returns 1 on failure (cannot allocate new heap entry)
  272  *
  273  * If offset > 0 the position (index, int) of the element in the heap is
  274  * also stored in the element itself at the given offset in bytes.
  275  */
  276 #define SET_OFFSET(heap, node) \
  277     if (heap->offset > 0) \
  278             *((int *)((char *)(heap->p[node].object) + heap->offset)) = node ;
  279 /*
  280  * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
  281  */
  282 #define RESET_OFFSET(heap, node) \
  283     if (heap->offset > 0) \
  284             *((int *)((char *)(heap->p[node].object) + heap->offset)) = -1 ;
  285 static int
  286 heap_insert(struct dn_heap *h, dn_key key1, void *p)
  287 {
  288     int son = h->elements ;
  289 
  290     if (p == NULL)      /* data already there, set starting point */
  291         son = key1 ;
  292     else {              /* insert new element at the end, possibly resize */
  293         son = h->elements ;
  294         if (son == h->size) /* need resize... */
  295             if (heap_init(h, h->elements+1) )
  296                 return 1 ; /* failure... */
  297         h->p[son].object = p ;
  298         h->p[son].key = key1 ;
  299         h->elements++ ;
  300     }
  301     while (son > 0) {                           /* bubble up */
  302         int father = HEAP_FATHER(son) ;
  303         struct dn_heap_entry tmp  ;
  304 
  305         if (DN_KEY_LT( h->p[father].key, h->p[son].key ) )
  306             break ; /* found right position */
  307         /* son smaller than father, swap and repeat */
  308         HEAP_SWAP(h->p[son], h->p[father], tmp) ;
  309         SET_OFFSET(h, son);
  310         son = father ;
  311     }
  312     SET_OFFSET(h, son);
  313     return 0 ;
  314 }
  315 
  316 /*
  317  * remove top element from heap, or obj if obj != NULL
  318  */
  319 static void
  320 heap_extract(struct dn_heap *h, void *obj)
  321 {
  322     int child, father, max = h->elements - 1 ;
  323 
  324     if (max < 0) {
  325         printf("dummynet: warning, extract from empty heap 0x%p\n", h);
  326         return ;
  327     }
  328     father = 0 ; /* default: move up smallest child */
  329     if (obj != NULL) { /* extract specific element, index is at offset */
  330         if (h->offset <= 0)
  331             panic("dummynet: heap_extract from middle not supported on this heap!!!\n");
  332         father = *((int *)((char *)obj + h->offset)) ;
  333         if (father < 0 || father >= h->elements) {
  334             printf("dummynet: heap_extract, father %d out of bound 0..%d\n",
  335                 father, h->elements);
  336             panic("dummynet: heap_extract");
  337         }
  338     }
  339     RESET_OFFSET(h, father);
  340     child = HEAP_LEFT(father) ;         /* left child */
  341     while (child <= max) {              /* valid entry */
  342         if (child != max && DN_KEY_LT(h->p[child+1].key, h->p[child].key) )
  343             child = child+1 ;           /* take right child, otherwise left */
  344         h->p[father] = h->p[child] ;
  345         SET_OFFSET(h, father);
  346         father = child ;
  347         child = HEAP_LEFT(child) ;   /* left child for next loop */
  348     }
  349     h->elements-- ;
  350     if (father != max) {
  351         /*
  352          * Fill hole with last entry and bubble up, reusing the insert code
  353          */
  354         h->p[father] = h->p[max] ;
  355         heap_insert(h, father, NULL); /* this one cannot fail */
  356     }
  357 }
  358 
  359 #if 0
  360 /*
  361  * change object position and update references
  362  * XXX this one is never used!
  363  */
  364 static void
  365 heap_move(struct dn_heap *h, dn_key new_key, void *object)
  366 {
  367     int temp;
  368     int i ;
  369     int max = h->elements-1 ;
  370     struct dn_heap_entry buf ;
  371 
  372     if (h->offset <= 0)
  373         panic("cannot move items on this heap");
  374 
  375     i = *((int *)((char *)object + h->offset));
  376     if (DN_KEY_LT(new_key, h->p[i].key) ) { /* must move up */
  377         h->p[i].key = new_key ;
  378         for (; i>0 && DN_KEY_LT(new_key, h->p[(temp = HEAP_FATHER(i))].key) ;
  379                  i = temp ) { /* bubble up */
  380             HEAP_SWAP(h->p[i], h->p[temp], buf) ;
  381             SET_OFFSET(h, i);
  382         }
  383     } else {            /* must move down */
  384         h->p[i].key = new_key ;
  385         while ( (temp = HEAP_LEFT(i)) <= max ) { /* found left child */
  386             if ((temp != max) && DN_KEY_GT(h->p[temp].key, h->p[temp+1].key))
  387                 temp++ ; /* select child with min key */
  388             if (DN_KEY_GT(new_key, h->p[temp].key)) { /* go down */
  389                 HEAP_SWAP(h->p[i], h->p[temp], buf) ;
  390                 SET_OFFSET(h, i);
  391             } else
  392                 break ;
  393             i = temp ;
  394         }
  395     }
  396     SET_OFFSET(h, i);
  397 }
  398 #endif /* heap_move, unused */
  399 
  400 /*
  401  * heapify() will reorganize data inside an array to maintain the
  402  * heap property. It is needed when we delete a bunch of entries.
  403  */
  404 static void
  405 heapify(struct dn_heap *h)
  406 {
  407     int i ;
  408 
  409     for (i = 0 ; i < h->elements ; i++ )
  410         heap_insert(h, i , NULL) ;
  411 }
  412 
  413 /*
  414  * cleanup the heap and free data structure
  415  */
  416 static void
  417 heap_free(struct dn_heap *h)
  418 {
  419     if (h->size >0 )
  420         free(h->p, M_DUMMYNET);
  421     bzero(h, sizeof(*h) );
  422 }
  423 
  424 /*
  425  * --- end of heap management functions ---
  426  */
  427 
  428 /*
  429  * Return the mbuf tag holding the dummynet state.  As an optimization
  430  * this is assumed to be the first tag on the list.  If this turns out
  431  * wrong we'll need to search the list.
  432  */
  433 static struct dn_pkt_tag *
  434 dn_tag_get(struct mbuf *m)
  435 {
  436     struct m_tag *mtag = m_tag_first(m);
  437     KASSERT(mtag != NULL &&
  438             mtag->m_tag_cookie == MTAG_ABI_COMPAT &&
  439             mtag->m_tag_id == PACKET_TAG_DUMMYNET,
  440             ("packet on dummynet queue w/o dummynet tag!"));
  441     return (struct dn_pkt_tag *)(mtag+1);
  442 }
  443 
  444 /*
  445  * Scheduler functions:
  446  *
  447  * transmit_event() is called when the delay-line needs to enter
  448  * the scheduler, either because of existing pkts getting ready,
  449  * or new packets entering the queue. The event handled is the delivery
  450  * time of the packet.
  451  *
  452  * ready_event() does something similar with fixed-rate queues, and the
  453  * event handled is the finish time of the head pkt.
  454  *
  455  * wfq_ready_event() does something similar with WF2Q queues, and the
  456  * event handled is the start time of the head pkt.
  457  *
  458  * In all cases, we make sure that the data structures are consistent
  459  * before passing pkts out, because this might trigger recursive
  460  * invocations of the procedures.
  461  */
  462 static void
  463 transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail)
  464 {
  465         struct mbuf *m;
  466         struct dn_pkt_tag *pkt;
  467 
  468         DUMMYNET_LOCK_ASSERT();
  469 
  470         while ((m = pipe->head) != NULL) {
  471                 pkt = dn_tag_get(m);
  472                 if (!DN_KEY_LEQ(pkt->output_time, curr_time))
  473                         break;
  474 
  475                 pipe->head = m->m_nextpkt;
  476                 if (*tail != NULL)
  477                         (*tail)->m_nextpkt = m;
  478                 else
  479                         *head = m;
  480                 *tail = m;
  481         }
  482         if (*tail != NULL)
  483                 (*tail)->m_nextpkt = NULL;
  484 
  485         /* If there are leftover packets, put into the heap for next event. */
  486         if ((m = pipe->head) != NULL) {
  487                 pkt = dn_tag_get(m);
  488                 /*
  489                  * XXX: Should check errors on heap_insert, by draining the
  490                  * whole pipe p and hoping in the future we are more successful.
  491                  */
  492                 heap_insert(&extract_heap, pkt->output_time, pipe);
  493         }
  494 }
  495 
  496 /*
  497  * the following macro computes how many ticks we have to wait
  498  * before being able to transmit a packet. The credit is taken from
  499  * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
  500  */
  501 #define SET_TICKS(_m, q, p)     \
  502     ((_m)->m_pkthdr.len*8*hz - (q)->numbytes + p->bandwidth - 1 ) / \
  503             p->bandwidth ;
  504 
  505 /*
  506  * extract pkt from queue, compute output time (could be now)
  507  * and put into delay line (p_queue)
  508  */
  509 static void
  510 move_pkt(struct mbuf *pkt, struct dn_flow_queue *q,
  511         struct dn_pipe *p, int len)
  512 {
  513     struct dn_pkt_tag *dt = dn_tag_get(pkt);
  514 
  515     q->head = pkt->m_nextpkt ;
  516     q->len-- ;
  517     q->len_bytes -= len ;
  518 
  519     dt->output_time = curr_time + p->delay ;
  520 
  521     if (p->head == NULL)
  522         p->head = pkt;
  523     else
  524         p->tail->m_nextpkt = pkt;
  525     p->tail = pkt;
  526     p->tail->m_nextpkt = NULL;
  527 }
  528 
  529 /*
  530  * ready_event() is invoked every time the queue must enter the
  531  * scheduler, either because the first packet arrives, or because
  532  * a previously scheduled event fired.
  533  * On invokation, drain as many pkts as possible (could be 0) and then
  534  * if there are leftover packets reinsert the pkt in the scheduler.
  535  */
  536 static void
  537 ready_event(struct dn_flow_queue *q, struct mbuf **head, struct mbuf **tail)
  538 {
  539     struct mbuf *pkt;
  540     struct dn_pipe *p = q->fs->pipe ;
  541     int p_was_empty ;
  542 
  543     DUMMYNET_LOCK_ASSERT();
  544 
  545     if (p == NULL) {
  546         printf("dummynet: ready_event- pipe is gone\n");
  547         return ;
  548     }
  549     p_was_empty = (p->head == NULL) ;
  550 
  551     /*
  552      * schedule fixed-rate queues linked to this pipe:
  553      * Account for the bw accumulated since last scheduling, then
  554      * drain as many pkts as allowed by q->numbytes and move to
  555      * the delay line (in p) computing output time.
  556      * bandwidth==0 (no limit) means we can drain the whole queue,
  557      * setting len_scaled = 0 does the job.
  558      */
  559     q->numbytes += ( curr_time - q->sched_time ) * p->bandwidth;
  560     while ( (pkt = q->head) != NULL ) {
  561         int len = pkt->m_pkthdr.len;
  562         int len_scaled = p->bandwidth ? len*8*hz : 0 ;
  563         if (len_scaled > q->numbytes )
  564             break ;
  565         q->numbytes -= len_scaled ;
  566         move_pkt(pkt, q, p, len);
  567     }
  568     /*
  569      * If we have more packets queued, schedule next ready event
  570      * (can only occur when bandwidth != 0, otherwise we would have
  571      * flushed the whole queue in the previous loop).
  572      * To this purpose we record the current time and compute how many
  573      * ticks to go for the finish time of the packet.
  574      */
  575     if ( (pkt = q->head) != NULL ) { /* this implies bandwidth != 0 */
  576         dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
  577         q->sched_time = curr_time ;
  578         heap_insert(&ready_heap, curr_time + t, (void *)q );
  579         /* XXX should check errors on heap_insert, and drain the whole
  580          * queue on error hoping next time we are luckier.
  581          */
  582     } else {    /* RED needs to know when the queue becomes empty */
  583         q->q_time = curr_time;
  584         q->numbytes = 0;
  585     }
  586     /*
  587      * If the delay line was empty call transmit_event() now.
  588      * Otherwise, the scheduler will take care of it.
  589      */
  590     if (p_was_empty)
  591         transmit_event(p, head, tail);
  592 }
  593 
  594 /*
  595  * Called when we can transmit packets on WF2Q queues. Take pkts out of
  596  * the queues at their start time, and enqueue into the delay line.
  597  * Packets are drained until p->numbytes < 0. As long as
  598  * len_scaled >= p->numbytes, the packet goes into the delay line
  599  * with a deadline p->delay. For the last packet, if p->numbytes<0,
  600  * there is an additional delay.
  601  */
  602 static void
  603 ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail)
  604 {
  605     int p_was_empty = (p->head == NULL) ;
  606     struct dn_heap *sch = &(p->scheduler_heap);
  607     struct dn_heap *neh = &(p->not_eligible_heap) ;
  608 
  609     DUMMYNET_LOCK_ASSERT();
  610 
  611     if (p->if_name[0] == 0) /* tx clock is simulated */
  612         p->numbytes += ( curr_time - p->sched_time ) * p->bandwidth;
  613     else { /* tx clock is for real, the ifq must be empty or this is a NOP */
  614         if (p->ifp && p->ifp->if_snd.ifq_head != NULL)
  615             return ;
  616         else {
  617             DPRINTF(("dummynet: pipe %d ready from %s --\n",
  618                 p->pipe_nr, p->if_name));
  619         }
  620     }
  621 
  622     /*
  623      * While we have backlogged traffic AND credit, we need to do
  624      * something on the queue.
  625      */
  626     while ( p->numbytes >=0 && (sch->elements>0 || neh->elements >0) ) {
  627         if (sch->elements > 0) { /* have some eligible pkts to send out */
  628             struct dn_flow_queue *q = sch->p[0].object ;
  629             struct mbuf *pkt = q->head;
  630             struct dn_flow_set *fs = q->fs;
  631             u_int64_t len = pkt->m_pkthdr.len;
  632             int len_scaled = p->bandwidth ? len*8*hz : 0 ;
  633 
  634             heap_extract(sch, NULL); /* remove queue from heap */
  635             p->numbytes -= len_scaled ;
  636             move_pkt(pkt, q, p, len);
  637 
  638             p->V += (len<<MY_M) / p->sum ; /* update V */
  639             q->S = q->F ; /* update start time */
  640             if (q->len == 0) { /* Flow not backlogged any more */
  641                 fs->backlogged-- ;
  642                 heap_insert(&(p->idle_heap), q->F, q);
  643             } else { /* still backlogged */
  644                 /*
  645                  * update F and position in backlogged queue, then
  646                  * put flow in not_eligible_heap (we will fix this later).
  647                  */
  648                 len = (q->head)->m_pkthdr.len;
  649                 q->F += (len<<MY_M)/(u_int64_t) fs->weight ;
  650                 if (DN_KEY_LEQ(q->S, p->V))
  651                     heap_insert(neh, q->S, q);
  652                 else
  653                     heap_insert(sch, q->F, q);
  654             }
  655         }
  656         /*
  657          * now compute V = max(V, min(S_i)). Remember that all elements in sch
  658          * have by definition S_i <= V so if sch is not empty, V is surely
  659          * the max and we must not update it. Conversely, if sch is empty
  660          * we only need to look at neh.
  661          */
  662         if (sch->elements == 0 && neh->elements > 0)
  663             p->V = MAX64 ( p->V, neh->p[0].key );
  664         /* move from neh to sch any packets that have become eligible */
  665         while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V) ) {
  666             struct dn_flow_queue *q = neh->p[0].object ;
  667             heap_extract(neh, NULL);
  668             heap_insert(sch, q->F, q);
  669         }
  670 
  671         if (p->if_name[0] != '\0') {/* tx clock is from a real thing */
  672             p->numbytes = -1 ; /* mark not ready for I/O */
  673             break ;
  674         }
  675     }
  676     if (sch->elements == 0 && neh->elements == 0 && p->numbytes >= 0
  677             && p->idle_heap.elements > 0) {
  678         /*
  679          * no traffic and no events scheduled. We can get rid of idle-heap.
  680          */
  681         int i ;
  682 
  683         for (i = 0 ; i < p->idle_heap.elements ; i++) {
  684             struct dn_flow_queue *q = p->idle_heap.p[i].object ;
  685 
  686             q->F = 0 ;
  687             q->S = q->F + 1 ;
  688         }
  689         p->sum = 0 ;
  690         p->V = 0 ;
  691         p->idle_heap.elements = 0 ;
  692     }
  693     /*
  694      * If we are getting clocks from dummynet (not a real interface) and
  695      * If we are under credit, schedule the next ready event.
  696      * Also fix the delivery time of the last packet.
  697      */
  698     if (p->if_name[0]==0 && p->numbytes < 0) { /* this implies bandwidth >0 */
  699         dn_key t=0 ; /* number of ticks i have to wait */
  700 
  701         if (p->bandwidth > 0)
  702             t = ( p->bandwidth -1 - p->numbytes) / p->bandwidth ;
  703         dn_tag_get(p->tail)->output_time += t ;
  704         p->sched_time = curr_time ;
  705         heap_insert(&wfq_ready_heap, curr_time + t, (void *)p);
  706         /* XXX should check errors on heap_insert, and drain the whole
  707          * queue on error hoping next time we are luckier.
  708          */
  709     }
  710     /*
  711      * If the delay line was empty call transmit_event() now.
  712      * Otherwise, the scheduler will take care of it.
  713      */
  714     if (p_was_empty)
  715         transmit_event(p, head, tail);
  716 }
  717 
  718 /*
  719  * This is called one tick, after previous run. It is used to
  720  * schedule next run.
  721  */
  722 static void
  723 dummynet(void * __unused unused)
  724 {
  725         taskqueue_enqueue(dn_tq, &dn_task);
  726 }
  727 
  728 /*
  729  * The main dummynet processing function.
  730  */
  731 static void
  732 dummynet_task(void *context, int pending)
  733 {
  734 
  735         struct mbuf *head = NULL, *tail = NULL;
  736         struct dn_pipe *pipe;
  737         struct dn_heap *heaps[3];
  738         struct dn_heap *h;
  739         void *p;        /* generic parameter to handler */
  740         int i;
  741 
  742         NET_LOCK_GIANT();
  743         DUMMYNET_LOCK();
  744 
  745         heaps[0] = &ready_heap;                 /* fixed-rate queues */
  746         heaps[1] = &wfq_ready_heap;             /* wfq queues */
  747         heaps[2] = &extract_heap;               /* delay line */
  748 
  749         /* Update number of lost(coalesced) ticks. */
  750         tick_lost += pending - 1;
  751  
  752         getmicrouptime(&t);
  753         /* Last tick duration (usec). */
  754         tick_last = (t.tv_sec - prev_t.tv_sec) * 1000000 +
  755             (t.tv_usec - prev_t.tv_usec);
  756         /* Last tick vs standard tick difference (usec). */
  757         tick_delta = (tick_last * hz - 1000000) / hz;
  758         /* Accumulated tick difference (usec). */
  759         tick_delta_sum += tick_delta;
  760  
  761         prev_t = t;
  762  
  763         /*
  764          * Adjust curr_time if accumulated tick difference greater than
  765          * 'standard' tick. Since curr_time should be monotonically increasing,
  766          * we do positive adjustment as required and throttle curr_time in
  767          * case of negative adjustment.
  768          */
  769         curr_time++;
  770         if (tick_delta_sum - tick >= 0) {
  771                 int diff = tick_delta_sum / tick;
  772  
  773                 curr_time += diff;
  774                 tick_diff += diff;
  775                 tick_delta_sum %= tick;
  776                 tick_adjustment++;
  777         } else if (tick_delta_sum + tick <= 0) {
  778                 curr_time--;
  779                 tick_diff--;
  780                 tick_delta_sum += tick;
  781                 tick_adjustment++;
  782         }
  783 
  784         for (i = 0; i < 3; i++) {
  785                 h = heaps[i];
  786                 while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
  787                         if (h->p[0].key > curr_time)
  788                                 printf("dummynet: warning, "
  789                                     "heap %d is %d ticks late\n",
  790                                     i, (int)(curr_time - h->p[0].key));
  791                         /* store a copy before heap_extract */
  792                         p = h->p[0].object;
  793                         /* need to extract before processing */
  794                         heap_extract(h, NULL);
  795                         if (i == 0)
  796                                 ready_event(p, &head, &tail);
  797                         else if (i == 1) {
  798                                 struct dn_pipe *pipe = p;
  799                                 if (pipe->if_name[0] != '\0')
  800                                         printf("dummynet: bad ready_event_wfq "
  801                                             "for pipe %s\n", pipe->if_name);
  802                                 else
  803                                         ready_event_wfq(p, &head, &tail);
  804                         } else
  805                                 transmit_event(p, &head, &tail);
  806                 }
  807         }
  808 
  809         /* Sweep pipes trying to expire idle flow_queues. */
  810         for (i = 0; i < HASHSIZE; i++)
  811                 SLIST_FOREACH(pipe, &pipehash[i], next)
  812                         if (pipe->idle_heap.elements > 0 &&
  813                             DN_KEY_LT(pipe->idle_heap.p[0].key, pipe->V)) {
  814                                 struct dn_flow_queue *q =
  815                                     pipe->idle_heap.p[0].object;
  816 
  817                                 heap_extract(&(pipe->idle_heap), NULL);
  818                                 /* Mark timestamp as invalid. */
  819                                 q->S = q->F + 1;
  820                                 pipe->sum -= q->fs->weight;
  821                         }
  822 
  823         DUMMYNET_UNLOCK();
  824 
  825         if (head != NULL)
  826                 dummynet_send(head);
  827 
  828         callout_reset(&dn_timeout, 1, dummynet, NULL);
  829 
  830         NET_UNLOCK_GIANT();
  831 }
  832 
  833 static void
  834 dummynet_send(struct mbuf *m)
  835 {
  836         struct dn_pkt_tag *pkt;
  837         struct mbuf *n;
  838         struct ip *ip;
  839 
  840         for (; m != NULL; m = n) {
  841                 n = m->m_nextpkt;
  842                 m->m_nextpkt = NULL;
  843                 pkt = dn_tag_get(m);
  844                 switch (pkt->dn_dir) {
  845                 case DN_TO_IP_OUT:
  846                         ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
  847                         break ;
  848                   case DN_TO_IP_IN :
  849                         ip = mtod(m, struct ip *);
  850                         ip->ip_len = htons(ip->ip_len);
  851                         ip->ip_off = htons(ip->ip_off);
  852                         netisr_dispatch(NETISR_IP, m);
  853                         break;
  854 #ifdef INET6
  855                 case DN_TO_IP6_IN:
  856                         netisr_dispatch(NETISR_IPV6, m);
  857                         break;
  858 
  859                 case DN_TO_IP6_OUT:
  860                         ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
  861                         break;
  862 #endif
  863                 case DN_TO_IFB_FWD:
  864                         if (bridge_dn_p != NULL)
  865                                 ((*bridge_dn_p)(m, pkt->ifp));
  866                         else
  867                                 printf("dummynet: if_bridge not loaded\n");
  868 
  869                         break;
  870                 case DN_TO_BDG_FWD :
  871                         /*
  872                          * The bridge requires/assumes the Ethernet header is
  873                          * contiguous in the first mbuf header.  Ensure this
  874                          * is true.
  875                          */
  876                         if (BDG_LOADED) {
  877                                 if (m->m_len < ETHER_HDR_LEN &&
  878                                     (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
  879                                         printf("dummynet/bridge: pullup fail, "
  880                                             "dropping pkt\n");
  881                                         break;
  882                                 }
  883                                 m = bdg_forward_ptr(m, pkt->ifp);
  884                         } else {
  885                                 /*
  886                                  * somebody unloaded the bridge module.
  887                                  * Drop pkt
  888                                  */
  889                                 /* XXX rate limit */
  890                                 printf("dummynet: dropping bridged packet "
  891                                     "trapped in pipe\n");
  892                         }
  893                         if (m)
  894                                 m_freem(m);
  895                         break;
  896                 case DN_TO_ETH_DEMUX:
  897                         /*
  898                          * The Ethernet code assumes the Ethernet header is
  899                          * contiguous in the first mbuf header.
  900                          * Insure this is true.
  901                          */
  902                         if (m->m_len < ETHER_HDR_LEN &&
  903                             (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
  904                                 printf("dummynet/ether: pullup failed, "
  905                                     "dropping packet\n");
  906                                 break;
  907                         }
  908                         ether_demux(m->m_pkthdr.rcvif, m);
  909                         break;
  910                 case DN_TO_ETH_OUT:
  911                         ether_output_frame(pkt->ifp, m);
  912                         break;
  913                 default:
  914                         printf("dummynet: bad switch %d!\n", pkt->dn_dir);
  915                         m_freem(m);
  916                         break;
  917                 }
  918         }
  919 }
  920 
  921 /*
  922  * Unconditionally expire empty queues in case of shortage.
  923  * Returns the number of queues freed.
  924  */
  925 static int
  926 expire_queues(struct dn_flow_set *fs)
  927 {
  928     struct dn_flow_queue *q, *prev ;
  929     int i, initial_elements = fs->rq_elements ;
  930 
  931     if (fs->last_expired == time_second)
  932         return 0 ;
  933     fs->last_expired = time_second ;
  934     for (i = 0 ; i <= fs->rq_size ; i++) /* last one is overflow */
  935         for (prev=NULL, q = fs->rq[i] ; q != NULL ; )
  936             if (q->head != NULL || q->S != q->F+1) {
  937                 prev = q ;
  938                 q = q->next ;
  939             } else { /* entry is idle, expire it */
  940                 struct dn_flow_queue *old_q = q ;
  941 
  942                 if (prev != NULL)
  943                     prev->next = q = q->next ;
  944                 else
  945                     fs->rq[i] = q = q->next ;
  946                 fs->rq_elements-- ;
  947                 free(old_q, M_DUMMYNET);
  948             }
  949     return initial_elements - fs->rq_elements ;
  950 }
  951 
  952 /*
  953  * If room, create a new queue and put at head of slot i;
  954  * otherwise, create or use the default queue.
  955  */
  956 static struct dn_flow_queue *
  957 create_queue(struct dn_flow_set *fs, int i)
  958 {
  959     struct dn_flow_queue *q ;
  960 
  961     if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
  962             expire_queues(fs) == 0) {
  963         /*
  964          * No way to get room, use or create overflow queue.
  965          */
  966         i = fs->rq_size ;
  967         if ( fs->rq[i] != NULL )
  968             return fs->rq[i] ;
  969     }
  970     q = malloc(sizeof(*q), M_DUMMYNET, M_NOWAIT | M_ZERO);
  971     if (q == NULL) {
  972         printf("dummynet: sorry, cannot allocate queue for new flow\n");
  973         return NULL ;
  974     }
  975     q->fs = fs ;
  976     q->hash_slot = i ;
  977     q->next = fs->rq[i] ;
  978     q->S = q->F + 1;   /* hack - mark timestamp as invalid */
  979     fs->rq[i] = q ;
  980     fs->rq_elements++ ;
  981     return q ;
  982 }
  983 
  984 /*
  985  * Given a flow_set and a pkt in last_pkt, find a matching queue
  986  * after appropriate masking. The queue is moved to front
  987  * so that further searches take less time.
  988  */
  989 static struct dn_flow_queue *
  990 find_queue(struct dn_flow_set *fs, struct ipfw_flow_id *id)
  991 {
  992     int i = 0 ; /* we need i and q for new allocations */
  993     struct dn_flow_queue *q, *prev;
  994     int is_v6 = IS_IP6_FLOW_ID(id);
  995 
  996     if ( !(fs->flags_fs & DN_HAVE_FLOW_MASK) )
  997         q = fs->rq[0] ;
  998     else {
  999         /* first, do the masking, then hash */
 1000         id->dst_port &= fs->flow_mask.dst_port ;
 1001         id->src_port &= fs->flow_mask.src_port ;
 1002         id->proto &= fs->flow_mask.proto ;
 1003         id->flags = 0 ; /* we don't care about this one */
 1004         if (is_v6) {
 1005             APPLY_MASK(&id->dst_ip6, &fs->flow_mask.dst_ip6);
 1006             APPLY_MASK(&id->src_ip6, &fs->flow_mask.src_ip6);
 1007             id->flow_id6 &= fs->flow_mask.flow_id6;
 1008 
 1009             i = ((id->dst_ip6.__u6_addr.__u6_addr32[0]) & 0xffff)^
 1010                 ((id->dst_ip6.__u6_addr.__u6_addr32[1]) & 0xffff)^
 1011                 ((id->dst_ip6.__u6_addr.__u6_addr32[2]) & 0xffff)^
 1012                 ((id->dst_ip6.__u6_addr.__u6_addr32[3]) & 0xffff)^
 1013 
 1014                 ((id->dst_ip6.__u6_addr.__u6_addr32[0] >> 15) & 0xffff)^
 1015                 ((id->dst_ip6.__u6_addr.__u6_addr32[1] >> 15) & 0xffff)^
 1016                 ((id->dst_ip6.__u6_addr.__u6_addr32[2] >> 15) & 0xffff)^
 1017                 ((id->dst_ip6.__u6_addr.__u6_addr32[3] >> 15) & 0xffff)^
 1018 
 1019                 ((id->src_ip6.__u6_addr.__u6_addr32[0] << 1) & 0xfffff)^
 1020                 ((id->src_ip6.__u6_addr.__u6_addr32[1] << 1) & 0xfffff)^
 1021                 ((id->src_ip6.__u6_addr.__u6_addr32[2] << 1) & 0xfffff)^
 1022                 ((id->src_ip6.__u6_addr.__u6_addr32[3] << 1) & 0xfffff)^
 1023 
 1024                 ((id->src_ip6.__u6_addr.__u6_addr32[0] << 16) & 0xffff)^
 1025                 ((id->src_ip6.__u6_addr.__u6_addr32[1] << 16) & 0xffff)^
 1026                 ((id->src_ip6.__u6_addr.__u6_addr32[2] << 16) & 0xffff)^
 1027                 ((id->src_ip6.__u6_addr.__u6_addr32[3] << 16) & 0xffff)^
 1028 
 1029                 (id->dst_port << 1) ^ (id->src_port) ^
 1030                 (id->proto ) ^
 1031                 (id->flow_id6);
 1032         } else {
 1033             id->dst_ip &= fs->flow_mask.dst_ip ;
 1034             id->src_ip &= fs->flow_mask.src_ip ;
 1035 
 1036             i = ( (id->dst_ip) & 0xffff ) ^
 1037                 ( (id->dst_ip >> 15) & 0xffff ) ^
 1038                 ( (id->src_ip << 1) & 0xffff ) ^
 1039                 ( (id->src_ip >> 16 ) & 0xffff ) ^
 1040                 (id->dst_port << 1) ^ (id->src_port) ^
 1041                 (id->proto );
 1042         }
 1043         i = i % fs->rq_size ;
 1044         /* finally, scan the current list for a match */
 1045         searches++ ;
 1046         for (prev=NULL, q = fs->rq[i] ; q ; ) {
 1047             search_steps++;
 1048             if (is_v6 &&
 1049                     IN6_ARE_ADDR_EQUAL(&id->dst_ip6,&q->id.dst_ip6) &&  
 1050                     IN6_ARE_ADDR_EQUAL(&id->src_ip6,&q->id.src_ip6) &&  
 1051                     id->dst_port == q->id.dst_port &&
 1052                     id->src_port == q->id.src_port &&
 1053                     id->proto == q->id.proto &&
 1054                     id->flags == q->id.flags &&
 1055                     id->flow_id6 == q->id.flow_id6)
 1056                 break ; /* found */
 1057 
 1058             if (!is_v6 && id->dst_ip == q->id.dst_ip &&
 1059                     id->src_ip == q->id.src_ip &&
 1060                     id->dst_port == q->id.dst_port &&
 1061                     id->src_port == q->id.src_port &&
 1062                     id->proto == q->id.proto &&
 1063                     id->flags == q->id.flags)
 1064                 break ; /* found */
 1065 
 1066             /* No match. Check if we can expire the entry */
 1067             if (pipe_expire && q->head == NULL && q->S == q->F+1 ) {
 1068                 /* entry is idle and not in any heap, expire it */
 1069                 struct dn_flow_queue *old_q = q ;
 1070 
 1071                 if (prev != NULL)
 1072                     prev->next = q = q->next ;
 1073                 else
 1074                     fs->rq[i] = q = q->next ;
 1075                 fs->rq_elements-- ;
 1076                 free(old_q, M_DUMMYNET);
 1077                 continue ;
 1078             }
 1079             prev = q ;
 1080             q = q->next ;
 1081         }
 1082         if (q && prev != NULL) { /* found and not in front */
 1083             prev->next = q->next ;
 1084             q->next = fs->rq[i] ;
 1085             fs->rq[i] = q ;
 1086         }
 1087     }
 1088     if (q == NULL) { /* no match, need to allocate a new entry */
 1089         q = create_queue(fs, i);
 1090         if (q != NULL)
 1091         q->id = *id ;
 1092     }
 1093     return q ;
 1094 }
 1095 
 1096 static int
 1097 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
 1098 {
 1099         /*
 1100          * RED algorithm
 1101          *
 1102          * RED calculates the average queue size (avg) using a low-pass filter
 1103          * with an exponential weighted (w_q) moving average:
 1104          *      avg  <-  (1-w_q) * avg + w_q * q_size
 1105          * where q_size is the queue length (measured in bytes or * packets).
 1106          *
 1107          * If q_size == 0, we compute the idle time for the link, and set
 1108          *      avg = (1 - w_q)^(idle/s)
 1109          * where s is the time needed for transmitting a medium-sized packet.
 1110          *
 1111          * Now, if avg < min_th the packet is enqueued.
 1112          * If avg > max_th the packet is dropped. Otherwise, the packet is
 1113          * dropped with probability P function of avg.
 1114          */
 1115 
 1116         int64_t p_b = 0;
 1117 
 1118         /* Queue in bytes or packets? */
 1119         u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ?
 1120             q->len_bytes : q->len;
 1121 
 1122         DPRINTF(("\ndummynet: %d q: %2u ", (int)curr_time, q_size));
 1123 
 1124         /* Average queue size estimation. */
 1125         if (q_size != 0) {
 1126                 /* Queue is not empty, avg <- avg + (q_size - avg) * w_q */
 1127                 int diff = SCALE(q_size) - q->avg;
 1128                 int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
 1129 
 1130                 q->avg += (int)v;
 1131         } else {
 1132                 /*
 1133                  * Queue is empty, find for how long the queue has been
 1134                  * empty and use a lookup table for computing
 1135                  * (1 - * w_q)^(idle_time/s) where s is the time to send a
 1136                  * (small) packet.
 1137                  * XXX check wraps...
 1138                  */
 1139                 if (q->avg) {
 1140                         u_int t = (curr_time - q->q_time) / fs->lookup_step;
 1141 
 1142                         q->avg = (t < fs->lookup_depth) ?
 1143                             SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
 1144                 }
 1145         }
 1146         DPRINTF(("dummynet: avg: %u ", SCALE_VAL(q->avg)));
 1147 
 1148         /* Should i drop? */
 1149         if (q->avg < fs->min_th) {
 1150                 q->count = -1;
 1151                 return (0);     /* accept packet */
 1152         }
 1153         if (q->avg >= fs->max_th) {     /* average queue >=  max threshold */
 1154                 if (fs->flags_fs & DN_IS_GENTLE_RED) {
 1155                         /*
 1156                          * According to Gentle-RED, if avg is greater than
 1157                          * max_th the packet is dropped with a probability
 1158                          *       p_b = c_3 * avg - c_4
 1159                          * where c_3 = (1 - max_p) / max_th
 1160                          *       c_4 = 1 - 2 * max_p
 1161                          */
 1162                         p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) -
 1163                             fs->c_4;
 1164                 } else {
 1165                         q->count = -1;
 1166                         DPRINTF(("dummynet: - drop"));
 1167                         return (1);
 1168                 }
 1169         } else if (q->avg > fs->min_th) {
 1170                 /*
 1171                  * We compute p_b using the linear dropping function
 1172                  *       p_b = c_1 * avg - c_2
 1173                  * where c_1 = max_p / (max_th - min_th)
 1174                  *       c_2 = max_p * min_th / (max_th - min_th)
 1175                  */
 1176                 p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
 1177         }
 1178 
 1179         if (fs->flags_fs & DN_QSIZE_IS_BYTES)
 1180                 p_b = (p_b * len) / fs->max_pkt_size;
 1181         if (++q->count == 0)
 1182                 q->random = random() & 0xffff;
 1183         else {
 1184                 /*
 1185                  * q->count counts packets arrived since last drop, so a greater
 1186                  * value of q->count means a greater packet drop probability.
 1187                  */
 1188                 if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
 1189                         q->count = 0;
 1190                         DPRINTF(("dummynet: - red drop"));
 1191                         /* After a drop we calculate a new random value. */
 1192                         q->random = random() & 0xffff;
 1193                         return (1);     /* drop */
 1194                 }
 1195         }
 1196         /* End of RED algorithm. */
 1197 
 1198         return (0);     /* accept */
 1199 }
 1200 
 1201 static __inline struct dn_flow_set *
 1202 locate_flowset(int fs_nr)
 1203 {
 1204         struct dn_flow_set *fs;
 1205 
 1206         SLIST_FOREACH(fs, &flowsethash[HASH(fs_nr)], next)
 1207                 if (fs->fs_nr == fs_nr)
 1208                         return (fs);
 1209 
 1210         return (NULL);
 1211 }
 1212 
 1213 static __inline struct dn_pipe *
 1214 locate_pipe(int pipe_nr)
 1215 {
 1216         struct dn_pipe *pipe;
 1217 
 1218         SLIST_FOREACH(pipe, &pipehash[HASH(pipe_nr)], next)
 1219                 if (pipe->pipe_nr == pipe_nr)
 1220                         return (pipe);
 1221 
 1222         return (NULL);
 1223 }
 1224 
 1225 /*
 1226  * dummynet hook for packets. Below 'pipe' is a pipe or a queue
 1227  * depending on whether WF2Q or fixed bw is used.
 1228  *
 1229  * pipe_nr      pipe or queue the packet is destined for.
 1230  * dir          where shall we send the packet after dummynet.
 1231  * m            the mbuf with the packet
 1232  * ifp          the 'ifp' parameter from the caller.
 1233  *              NULL in ip_input, destination interface in ip_output,
 1234  *              real_dst in bdg_forward
 1235  * rule         matching rule, in case of multiple passes
 1236  *
 1237  */
 1238 static int
 1239 dummynet_io(struct mbuf *m, int dir, struct ip_fw_args *fwa)
 1240 {
 1241     struct mbuf *head = NULL, *tail = NULL;
 1242     struct dn_pkt_tag *pkt;
 1243     struct m_tag *mtag;
 1244     struct dn_flow_set *fs = NULL;
 1245     struct dn_pipe *pipe ;
 1246     u_int64_t len = m->m_pkthdr.len ;
 1247     struct dn_flow_queue *q = NULL ;
 1248     int is_pipe;
 1249     ipfw_insn *cmd = ACTION_PTR(fwa->rule);
 1250 
 1251     KASSERT(m->m_nextpkt == NULL,
 1252         ("dummynet_io: mbuf queue passed to dummynet"));
 1253 
 1254     if (cmd->opcode == O_LOG)
 1255         cmd += F_LEN(cmd);
 1256     if (cmd->opcode == O_ALTQ)
 1257         cmd += F_LEN(cmd);
 1258     if (cmd->opcode == O_TAG)
 1259         cmd += F_LEN(cmd);
 1260     is_pipe = (cmd->opcode == O_PIPE);
 1261 
 1262     DUMMYNET_LOCK();
 1263     /*
 1264      * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule.
 1265      *
 1266      * XXXGL: probably the pipe->fs and fs->pipe logic here
 1267      * below can be simplified.
 1268      */
 1269     if (is_pipe) {
 1270         pipe = locate_pipe(fwa->cookie);
 1271         if (pipe != NULL)
 1272                 fs = &(pipe->fs);
 1273     } else
 1274         fs = locate_flowset(fwa->cookie);
 1275 
 1276     if (fs == NULL)
 1277         goto dropit;    /* This queue/pipe does not exist! */
 1278     pipe = fs->pipe;
 1279     if (pipe == NULL) { /* Must be a queue, try find a matching pipe. */
 1280         pipe = locate_pipe(fs->parent_nr);
 1281         if (pipe != NULL)
 1282             fs->pipe = pipe;
 1283         else {
 1284             printf("dummynet: no pipe %d for queue %d, drop pkt\n",
 1285                 fs->parent_nr, fs->fs_nr);
 1286             goto dropit ;
 1287         }
 1288     }
 1289     q = find_queue(fs, &(fwa->f_id));
 1290     if ( q == NULL )
 1291         goto dropit ;           /* cannot allocate queue                */
 1292     /*
 1293      * update statistics, then check reasons to drop pkt
 1294      */
 1295     q->tot_bytes += len ;
 1296     q->tot_pkts++ ;
 1297     if ( fs->plr && random() < fs->plr )
 1298         goto dropit ;           /* random pkt drop                      */
 1299     if ( fs->flags_fs & DN_QSIZE_IS_BYTES) {
 1300         if (q->len_bytes > fs->qsize)
 1301             goto dropit ;       /* queue size overflow                  */
 1302     } else {
 1303         if (q->len >= fs->qsize)
 1304             goto dropit ;       /* queue count overflow                 */
 1305     }
 1306     if ( fs->flags_fs & DN_IS_RED && red_drops(fs, q, len) )
 1307         goto dropit ;
 1308 
 1309     /* XXX expensive to zero, see if we can remove it*/
 1310     mtag = m_tag_get(PACKET_TAG_DUMMYNET,
 1311                 sizeof(struct dn_pkt_tag), M_NOWAIT|M_ZERO);
 1312     if ( mtag == NULL )
 1313         goto dropit ;           /* cannot allocate packet header        */
 1314     m_tag_prepend(m, mtag);     /* attach to mbuf chain */
 1315 
 1316     pkt = (struct dn_pkt_tag *)(mtag+1);
 1317     /* ok, i can handle the pkt now... */
 1318     /* build and enqueue packet + parameters */
 1319     pkt->rule = fwa->rule ;
 1320     pkt->dn_dir = dir ;
 1321 
 1322     pkt->ifp = fwa->oif;
 1323 
 1324     if (q->head == NULL)
 1325         q->head = m;
 1326     else
 1327         q->tail->m_nextpkt = m;
 1328     q->tail = m;
 1329     q->len++;
 1330     q->len_bytes += len ;
 1331 
 1332     if ( q->head != m )         /* flow was not idle, we are done */
 1333         goto done;
 1334     /*
 1335      * If we reach this point the flow was previously idle, so we need
 1336      * to schedule it. This involves different actions for fixed-rate or
 1337      * WF2Q queues.
 1338      */
 1339     if (is_pipe) {
 1340         /*
 1341          * Fixed-rate queue: just insert into the ready_heap.
 1342          */
 1343         dn_key t = 0 ;
 1344         if (pipe->bandwidth)
 1345             t = SET_TICKS(m, q, pipe);
 1346         q->sched_time = curr_time ;
 1347         if (t == 0)     /* must process it now */
 1348             ready_event(q, &head, &tail);
 1349         else
 1350             heap_insert(&ready_heap, curr_time + t , q );
 1351     } else {
 1352         /*
 1353          * WF2Q. First, compute start time S: if the flow was idle (S=F+1)
 1354          * set S to the virtual time V for the controlling pipe, and update
 1355          * the sum of weights for the pipe; otherwise, remove flow from
 1356          * idle_heap and set S to max(F,V).
 1357          * Second, compute finish time F = S + len/weight.
 1358          * Third, if pipe was idle, update V=max(S, V).
 1359          * Fourth, count one more backlogged flow.
 1360          */
 1361         if (DN_KEY_GT(q->S, q->F)) { /* means timestamps are invalid */
 1362             q->S = pipe->V ;
 1363             pipe->sum += fs->weight ; /* add weight of new queue */
 1364         } else {
 1365             heap_extract(&(pipe->idle_heap), q);
 1366             q->S = MAX64(q->F, pipe->V ) ;
 1367         }
 1368         q->F = q->S + ( len<<MY_M )/(u_int64_t) fs->weight;
 1369 
 1370         if (pipe->not_eligible_heap.elements == 0 &&
 1371                 pipe->scheduler_heap.elements == 0)
 1372             pipe->V = MAX64 ( q->S, pipe->V );
 1373         fs->backlogged++ ;
 1374         /*
 1375          * Look at eligibility. A flow is not eligibile if S>V (when
 1376          * this happens, it means that there is some other flow already
 1377          * scheduled for the same pipe, so the scheduler_heap cannot be
 1378          * empty). If the flow is not eligible we just store it in the
 1379          * not_eligible_heap. Otherwise, we store in the scheduler_heap
 1380          * and possibly invoke ready_event_wfq() right now if there is
 1381          * leftover credit.
 1382          * Note that for all flows in scheduler_heap (SCH), S_i <= V,
 1383          * and for all flows in not_eligible_heap (NEH), S_i > V .
 1384          * So when we need to compute max( V, min(S_i) ) forall i in SCH+NEH,
 1385          * we only need to look into NEH.
 1386          */
 1387         if (DN_KEY_GT(q->S, pipe->V) ) { /* not eligible */
 1388             if (pipe->scheduler_heap.elements == 0)
 1389                 printf("dummynet: ++ ouch! not eligible but empty scheduler!\n");
 1390             heap_insert(&(pipe->not_eligible_heap), q->S, q);
 1391         } else {
 1392             heap_insert(&(pipe->scheduler_heap), q->F, q);
 1393             if (pipe->numbytes >= 0) { /* pipe is idle */
 1394                 if (pipe->scheduler_heap.elements != 1)
 1395                     printf("dummynet: OUCH! pipe should have been idle!\n");
 1396                 DPRINTF(("dummynet: waking up pipe %d at %d\n",
 1397                         pipe->pipe_nr, (int)(q->F >> MY_M)));
 1398                 pipe->sched_time = curr_time ;
 1399                 ready_event_wfq(pipe, &head, &tail);
 1400             }
 1401         }
 1402     }
 1403 done:
 1404     DUMMYNET_UNLOCK();
 1405     if (head != NULL)
 1406         dummynet_send(head);
 1407     return 0;
 1408 
 1409 dropit:
 1410     if (q)
 1411         q->drops++ ;
 1412     DUMMYNET_UNLOCK();
 1413     m_freem(m);
 1414     return ( (fs && (fs->flags_fs & DN_NOERROR)) ? 0 : ENOBUFS);
 1415 }
 1416 
 1417 /*
 1418  * Below, the rt_unref is only needed when (pkt->dn_dir == DN_TO_IP_OUT)
 1419  * Doing this would probably save us the initial bzero of dn_pkt
 1420  */
 1421 #define DN_FREE_PKT(_m) do {                            \
 1422         m_freem(_m);                                    \
 1423 } while (0)
 1424 
 1425 /*
 1426  * Dispose all packets and flow_queues on a flow_set.
 1427  * If all=1, also remove red lookup table and other storage,
 1428  * including the descriptor itself.
 1429  * For the one in dn_pipe MUST also cleanup ready_heap...
 1430  */
 1431 static void
 1432 purge_flow_set(struct dn_flow_set *fs, int all)
 1433 {
 1434         struct dn_flow_queue *q, *qn;
 1435         int i;
 1436 
 1437         DUMMYNET_LOCK_ASSERT();
 1438 
 1439         for (i = 0; i <= fs->rq_size; i++) {
 1440                 for (q = fs->rq[i]; q != NULL; q = qn) {
 1441                         struct mbuf *m, *mnext;
 1442 
 1443                         mnext = q->head;
 1444                         while ((m = mnext) != NULL) {
 1445                                 mnext = m->m_nextpkt;
 1446                                 DN_FREE_PKT(m);
 1447                         }
 1448                         qn = q->next;
 1449                         free(q, M_DUMMYNET);
 1450                 }
 1451                 fs->rq[i] = NULL;
 1452         }
 1453 
 1454         fs->rq_elements = 0;
 1455         if (all) {
 1456                 /* RED - free lookup table. */
 1457                 if (fs->w_q_lookup != NULL)
 1458                         free(fs->w_q_lookup, M_DUMMYNET);
 1459                 if (fs->rq != NULL)
 1460                         free(fs->rq, M_DUMMYNET);
 1461                 /* If this fs is not part of a pipe, free it. */
 1462                 if (fs->pipe == NULL || fs != &(fs->pipe->fs))
 1463                         free(fs, M_DUMMYNET);
 1464         }
 1465 }
 1466 
 1467 /*
 1468  * Dispose all packets queued on a pipe (not a flow_set).
 1469  * Also free all resources associated to a pipe, which is about
 1470  * to be deleted.
 1471  */
 1472 static void
 1473 purge_pipe(struct dn_pipe *pipe)
 1474 {
 1475     struct mbuf *m, *mnext;
 1476 
 1477     purge_flow_set( &(pipe->fs), 1 );
 1478 
 1479     mnext = pipe->head;
 1480     while ((m = mnext) != NULL) {
 1481         mnext = m->m_nextpkt;
 1482         DN_FREE_PKT(m);
 1483     }
 1484 
 1485     heap_free( &(pipe->scheduler_heap) );
 1486     heap_free( &(pipe->not_eligible_heap) );
 1487     heap_free( &(pipe->idle_heap) );
 1488 }
 1489 
 1490 /*
 1491  * Delete all pipes and heaps returning memory. Must also
 1492  * remove references from all ipfw rules to all pipes.
 1493  */
 1494 static void
 1495 dummynet_flush(void)
 1496 {
 1497         struct dn_pipe *pipe, *pipe1;
 1498         struct dn_flow_set *fs, *fs1;
 1499         int i;
 1500 
 1501         DUMMYNET_LOCK();
 1502         /* Free heaps so we don't have unwanted events. */
 1503         heap_free(&ready_heap);
 1504         heap_free(&wfq_ready_heap);
 1505         heap_free(&extract_heap);
 1506 
 1507         /*
 1508          * Now purge all queued pkts and delete all pipes.
 1509          *
 1510          * XXXGL: can we merge the for(;;) cycles into one or not?
 1511          */
 1512         for (i = 0; i < HASHSIZE; i++)
 1513                 SLIST_FOREACH_SAFE(fs, &flowsethash[i], next, fs1) {
 1514                         SLIST_REMOVE(&flowsethash[i], fs, dn_flow_set, next);
 1515                         purge_flow_set(fs, 1);
 1516                 }
 1517         for (i = 0; i < HASHSIZE; i++)
 1518                 SLIST_FOREACH_SAFE(pipe, &pipehash[i], next, pipe1) {
 1519                         SLIST_REMOVE(&pipehash[i], pipe, dn_pipe, next);
 1520                         purge_pipe(pipe);
 1521                         free(pipe, M_DUMMYNET);
 1522                 }
 1523         DUMMYNET_UNLOCK();
 1524 }
 1525 
 1526 extern struct ip_fw *ip_fw_default_rule ;
 1527 static void
 1528 dn_rule_delete_fs(struct dn_flow_set *fs, void *r)
 1529 {
 1530     int i ;
 1531     struct dn_flow_queue *q ;
 1532     struct mbuf *m ;
 1533 
 1534     for (i = 0 ; i <= fs->rq_size ; i++) /* last one is ovflow */
 1535         for (q = fs->rq[i] ; q ; q = q->next )
 1536             for (m = q->head ; m ; m = m->m_nextpkt ) {
 1537                 struct dn_pkt_tag *pkt = dn_tag_get(m) ;
 1538                 if (pkt->rule == r)
 1539                     pkt->rule = ip_fw_default_rule ;
 1540             }
 1541 }
 1542 /*
 1543  * when a firewall rule is deleted, scan all queues and remove the flow-id
 1544  * from packets matching this rule.
 1545  */
 1546 void
 1547 dn_rule_delete(void *r)
 1548 {
 1549     struct dn_pipe *pipe;
 1550     struct dn_flow_set *fs;
 1551     struct dn_pkt_tag *pkt;
 1552     struct mbuf *m;
 1553     int i;
 1554 
 1555     DUMMYNET_LOCK();
 1556     /*
 1557      * If the rule references a queue (dn_flow_set), then scan
 1558      * the flow set, otherwise scan pipes. Should do either, but doing
 1559      * both does not harm.
 1560      */
 1561     for (i = 0; i < HASHSIZE; i++)
 1562         SLIST_FOREACH(fs, &flowsethash[i], next)
 1563                 dn_rule_delete_fs(fs, r);
 1564 
 1565     for (i = 0; i < HASHSIZE; i++)
 1566         SLIST_FOREACH(pipe, &pipehash[i], next) {
 1567                 fs = &(pipe->fs);
 1568                 dn_rule_delete_fs(fs, r);
 1569                 for (m = pipe->head ; m ; m = m->m_nextpkt ) {
 1570                         pkt = dn_tag_get(m);
 1571                         if (pkt->rule == r)
 1572                                 pkt->rule = ip_fw_default_rule;
 1573                 }
 1574         }
 1575     DUMMYNET_UNLOCK();
 1576 }
 1577 
 1578 /*
 1579  * setup RED parameters
 1580  */
 1581 static int
 1582 config_red(struct dn_flow_set *p, struct dn_flow_set * x)
 1583 {
 1584         int i;
 1585 
 1586         x->w_q = p->w_q;
 1587         x->min_th = SCALE(p->min_th);
 1588         x->max_th = SCALE(p->max_th);
 1589         x->max_p = p->max_p;
 1590 
 1591         x->c_1 = p->max_p / (p->max_th - p->min_th);
 1592         x->c_2 = SCALE_MUL(x->c_1, SCALE(p->min_th));
 1593 
 1594         if (x->flags_fs & DN_IS_GENTLE_RED) {
 1595                 x->c_3 = (SCALE(1) - p->max_p) / p->max_th;
 1596                 x->c_4 = SCALE(1) - 2 * p->max_p;
 1597         }
 1598 
 1599         /* If the lookup table already exist, free and create it again. */
 1600         if (x->w_q_lookup) {
 1601                 free(x->w_q_lookup, M_DUMMYNET);
 1602                 x->w_q_lookup = NULL;
 1603         }
 1604         if (red_lookup_depth == 0) {
 1605                 printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth"
 1606                     "must be > 0\n");
 1607                 free(x, M_DUMMYNET);
 1608                 return (EINVAL);
 1609         }
 1610         x->lookup_depth = red_lookup_depth;
 1611         x->w_q_lookup = (u_int *)malloc(x->lookup_depth * sizeof(int),
 1612             M_DUMMYNET, M_NOWAIT);
 1613         if (x->w_q_lookup == NULL) {
 1614                 printf("dummynet: sorry, cannot allocate red lookup table\n");
 1615                 free(x, M_DUMMYNET);
 1616                 return(ENOSPC);
 1617         }
 1618 
 1619         /* Fill the lookup table with (1 - w_q)^x */
 1620         x->lookup_step = p->lookup_step;
 1621         x->lookup_weight = p->lookup_weight;
 1622         x->w_q_lookup[0] = SCALE(1) - x->w_q;
 1623 
 1624         for (i = 1; i < x->lookup_depth; i++)
 1625                 x->w_q_lookup[i] =
 1626                     SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
 1627 
 1628         if (red_avg_pkt_size < 1)
 1629                 red_avg_pkt_size = 512;
 1630         x->avg_pkt_size = red_avg_pkt_size;
 1631         if (red_max_pkt_size < 1)
 1632                 red_max_pkt_size = 1500;
 1633         x->max_pkt_size = red_max_pkt_size;
 1634         return (0);
 1635 }
 1636 
 1637 static int
 1638 alloc_hash(struct dn_flow_set *x, struct dn_flow_set *pfs)
 1639 {
 1640     if (x->flags_fs & DN_HAVE_FLOW_MASK) {     /* allocate some slots */
 1641         int l = pfs->rq_size;
 1642 
 1643         if (l == 0)
 1644             l = dn_hash_size;
 1645         if (l < 4)
 1646             l = 4;
 1647         else if (l > DN_MAX_HASH_SIZE)
 1648             l = DN_MAX_HASH_SIZE;
 1649         x->rq_size = l;
 1650     } else                  /* one is enough for null mask */
 1651         x->rq_size = 1;
 1652     x->rq = malloc((1 + x->rq_size) * sizeof(struct dn_flow_queue *),
 1653             M_DUMMYNET, M_NOWAIT | M_ZERO);
 1654     if (x->rq == NULL) {
 1655         printf("dummynet: sorry, cannot allocate queue\n");
 1656         return (ENOMEM);
 1657     }
 1658     x->rq_elements = 0;
 1659     return 0 ;
 1660 }
 1661 
 1662 static void
 1663 set_fs_parms(struct dn_flow_set *x, struct dn_flow_set *src)
 1664 {
 1665         x->flags_fs = src->flags_fs;
 1666         x->qsize = src->qsize;
 1667         x->plr = src->plr;
 1668         x->flow_mask = src->flow_mask;
 1669         if (x->flags_fs & DN_QSIZE_IS_BYTES) {
 1670                 if (x->qsize > 1024 * 1024)
 1671                         x->qsize = 1024 * 1024;
 1672         } else {
 1673                 if (x->qsize == 0)
 1674                         x->qsize = 50;
 1675                 if (x->qsize > 100)
 1676                         x->qsize = 50;
 1677         }
 1678         /* Configuring RED. */
 1679         if (x->flags_fs & DN_IS_RED)
 1680                 config_red(src, x);     /* XXX should check errors */
 1681 }
 1682 
 1683 /*
 1684  * Setup pipe or queue parameters.
 1685  */
 1686 static int
 1687 config_pipe(struct dn_pipe *p)
 1688 {
 1689         struct dn_flow_set *pfs = &(p->fs);
 1690         struct dn_flow_queue *q;
 1691         int i, error;
 1692 
 1693         /*
 1694          * The config program passes parameters as follows:
 1695          * bw = bits/second (0 means no limits),
 1696          * delay = ms, must be translated into ticks.
 1697          * qsize = slots/bytes
 1698          */
 1699         p->delay = (p->delay * hz) / 1000;
 1700         /* We need either a pipe number or a flow_set number. */
 1701         if (p->pipe_nr == 0 && pfs->fs_nr == 0)
 1702                 return (EINVAL);
 1703         if (p->pipe_nr != 0 && pfs->fs_nr != 0)
 1704                 return (EINVAL);
 1705         if (p->pipe_nr != 0) {                  /* this is a pipe */
 1706                 struct dn_pipe *pipe;
 1707 
 1708                 DUMMYNET_LOCK();
 1709                 pipe = locate_pipe(p->pipe_nr); /* locate pipe */
 1710 
 1711                 if (pipe == NULL) {             /* new pipe */
 1712                         pipe = malloc(sizeof(struct dn_pipe), M_DUMMYNET,
 1713                             M_NOWAIT | M_ZERO);
 1714                         if (pipe == NULL) {
 1715                                 DUMMYNET_UNLOCK();
 1716                                 printf("dummynet: no memory for new pipe\n");
 1717                                 return (ENOMEM);
 1718                         }
 1719                         pipe->pipe_nr = p->pipe_nr;
 1720                         pipe->fs.pipe = pipe;
 1721                         /*
 1722                          * idle_heap is the only one from which
 1723                          * we extract from the middle.
 1724                          */
 1725                         pipe->idle_heap.size = pipe->idle_heap.elements = 0;
 1726                         pipe->idle_heap.offset =
 1727                             OFFSET_OF(struct dn_flow_queue, heap_pos);
 1728                 } else
 1729                         /* Flush accumulated credit for all queues. */
 1730                         for (i = 0; i <= pipe->fs.rq_size; i++)
 1731                                 for (q = pipe->fs.rq[i]; q; q = q->next)
 1732                                         q->numbytes = 0;
 1733 
 1734                 pipe->bandwidth = p->bandwidth;
 1735                 pipe->numbytes = 0;             /* just in case... */
 1736                 bcopy(p->if_name, pipe->if_name, sizeof(p->if_name));
 1737                 pipe->ifp = NULL;               /* reset interface ptr */
 1738                 pipe->delay = p->delay;
 1739                 set_fs_parms(&(pipe->fs), pfs);
 1740 
 1741                 if (pipe->fs.rq == NULL) {      /* a new pipe */
 1742                         error = alloc_hash(&(pipe->fs), pfs);
 1743                         if (error) {
 1744                                 DUMMYNET_UNLOCK();
 1745                                 free(pipe, M_DUMMYNET);
 1746                                 return (error);
 1747                         }
 1748                         SLIST_INSERT_HEAD(&pipehash[HASH(pipe->pipe_nr)],
 1749                             pipe, next);
 1750                 }
 1751                 DUMMYNET_UNLOCK();
 1752         } else {                                /* config queue */
 1753                 struct dn_flow_set *fs;
 1754 
 1755                 DUMMYNET_LOCK();
 1756                 fs = locate_flowset(pfs->fs_nr); /* locate flow_set */
 1757 
 1758                 if (fs == NULL) {               /* new */
 1759                         if (pfs->parent_nr == 0) { /* need link to a pipe */
 1760                                 DUMMYNET_UNLOCK();
 1761                                 return (EINVAL);
 1762                         }
 1763                         fs = malloc(sizeof(struct dn_flow_set), M_DUMMYNET,
 1764                             M_NOWAIT | M_ZERO);
 1765                         if (fs == NULL) {
 1766                                 DUMMYNET_UNLOCK();
 1767                                 printf(
 1768                                     "dummynet: no memory for new flow_set\n");
 1769                                 return (ENOMEM);
 1770                         }
 1771                         fs->fs_nr = pfs->fs_nr;
 1772                         fs->parent_nr = pfs->parent_nr;
 1773                         fs->weight = pfs->weight;
 1774                         if (fs->weight == 0)
 1775                                 fs->weight = 1;
 1776                         else if (fs->weight > 100)
 1777                                 fs->weight = 100;
 1778                 } else {
 1779                         /*
 1780                          * Change parent pipe not allowed;
 1781                          * must delete and recreate.
 1782                          */
 1783                         if (pfs->parent_nr != 0 &&
 1784                             fs->parent_nr != pfs->parent_nr) {
 1785                                 DUMMYNET_UNLOCK();
 1786                                 return (EINVAL);
 1787                         }
 1788                 }
 1789 
 1790                 set_fs_parms(fs, pfs);
 1791 
 1792                 if (fs->rq == NULL) {           /* a new flow_set */
 1793                         error = alloc_hash(fs, pfs);
 1794                         if (error) {
 1795                                 DUMMYNET_UNLOCK();
 1796                                 free(fs, M_DUMMYNET);
 1797                                 return (error);
 1798                         }
 1799                         SLIST_INSERT_HEAD(&flowsethash[HASH(fs->fs_nr)],
 1800                             fs, next);
 1801                 }
 1802                 DUMMYNET_UNLOCK();
 1803         }
 1804         return (0);
 1805 }
 1806 
 1807 /*
 1808  * Helper function to remove from a heap queues which are linked to
 1809  * a flow_set about to be deleted.
 1810  */
 1811 static void
 1812 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
 1813 {
 1814     int i = 0, found = 0 ;
 1815     for (; i < h->elements ;)
 1816         if ( ((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
 1817             h->elements-- ;
 1818             h->p[i] = h->p[h->elements] ;
 1819             found++ ;
 1820         } else
 1821             i++ ;
 1822     if (found)
 1823         heapify(h);
 1824 }
 1825 
 1826 /*
 1827  * helper function to remove a pipe from a heap (can be there at most once)
 1828  */
 1829 static void
 1830 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
 1831 {
 1832     if (h->elements > 0) {
 1833         int i = 0 ;
 1834         for (i=0; i < h->elements ; i++ ) {
 1835             if (h->p[i].object == p) { /* found it */
 1836                 h->elements-- ;
 1837                 h->p[i] = h->p[h->elements] ;
 1838                 heapify(h);
 1839                 break ;
 1840             }
 1841         }
 1842     }
 1843 }
 1844 
 1845 /*
 1846  * drain all queues. Called in case of severe mbuf shortage.
 1847  */
 1848 void
 1849 dummynet_drain()
 1850 {
 1851     struct dn_flow_set *fs;
 1852     struct dn_pipe *pipe;
 1853     struct mbuf *m, *mnext;
 1854     int i;
 1855 
 1856     DUMMYNET_LOCK_ASSERT();
 1857 
 1858     heap_free(&ready_heap);
 1859     heap_free(&wfq_ready_heap);
 1860     heap_free(&extract_heap);
 1861     /* remove all references to this pipe from flow_sets */
 1862     for (i = 0; i < HASHSIZE; i++)
 1863         SLIST_FOREACH(fs, &flowsethash[i], next)
 1864                 purge_flow_set(fs, 0);
 1865 
 1866     for (i = 0; i < HASHSIZE; i++) {
 1867         SLIST_FOREACH(pipe, &pipehash[i], next) {
 1868                 purge_flow_set(&(pipe->fs), 0);
 1869 
 1870                 mnext = pipe->head;
 1871                 while ((m = mnext) != NULL) {
 1872                         mnext = m->m_nextpkt;
 1873                         DN_FREE_PKT(m);
 1874                 }
 1875                 pipe->head = pipe->tail = NULL;
 1876         }
 1877     }
 1878 }
 1879 
 1880 /*
 1881  * Fully delete a pipe or a queue, cleaning up associated info.
 1882  */
 1883 static int
 1884 delete_pipe(struct dn_pipe *p)
 1885 {
 1886     if (p->pipe_nr == 0 && p->fs.fs_nr == 0)
 1887         return EINVAL ;
 1888     if (p->pipe_nr != 0 && p->fs.fs_nr != 0)
 1889         return EINVAL ;
 1890     if (p->pipe_nr != 0) { /* this is an old-style pipe */
 1891         struct dn_pipe *pipe;
 1892         struct dn_flow_set *fs;
 1893         int i;
 1894 
 1895         DUMMYNET_LOCK();
 1896         pipe = locate_pipe(p->pipe_nr); /* locate pipe */
 1897 
 1898         if (pipe == NULL) {
 1899             DUMMYNET_UNLOCK();
 1900             return (ENOENT);    /* not found */
 1901         }
 1902 
 1903         /* Unlink from list of pipes. */
 1904         SLIST_REMOVE(&pipehash[HASH(pipe->pipe_nr)], pipe, dn_pipe, next);
 1905 
 1906         /* Remove all references to this pipe from flow_sets. */
 1907         for (i = 0; i < HASHSIZE; i++)
 1908             SLIST_FOREACH(fs, &flowsethash[i], next)
 1909                 if (fs->pipe == pipe) {
 1910                         printf("dummynet: ++ ref to pipe %d from fs %d\n",
 1911                             p->pipe_nr, fs->fs_nr);
 1912                         fs->pipe = NULL ;
 1913                         purge_flow_set(fs, 0);
 1914                 }
 1915         fs_remove_from_heap(&ready_heap, &(pipe->fs));
 1916         purge_pipe(pipe); /* remove all data associated to this pipe */
 1917         /* remove reference to here from extract_heap and wfq_ready_heap */
 1918         pipe_remove_from_heap(&extract_heap, pipe);
 1919         pipe_remove_from_heap(&wfq_ready_heap, pipe);
 1920         DUMMYNET_UNLOCK();
 1921 
 1922         free(pipe, M_DUMMYNET);
 1923     } else { /* this is a WF2Q queue (dn_flow_set) */
 1924         struct dn_flow_set *fs;
 1925 
 1926         DUMMYNET_LOCK();
 1927         fs = locate_flowset(p->fs.fs_nr); /* locate set */
 1928 
 1929         if (fs == NULL) {
 1930             DUMMYNET_UNLOCK();
 1931             return (ENOENT); /* not found */
 1932         }
 1933 
 1934         /* Unlink from list of flowsets. */
 1935         SLIST_REMOVE( &flowsethash[HASH(fs->fs_nr)], fs, dn_flow_set, next);
 1936 
 1937         if (fs->pipe != NULL) {
 1938             /* Update total weight on parent pipe and cleanup parent heaps. */
 1939             fs->pipe->sum -= fs->weight * fs->backlogged ;
 1940             fs_remove_from_heap(&(fs->pipe->not_eligible_heap), fs);
 1941             fs_remove_from_heap(&(fs->pipe->scheduler_heap), fs);
 1942 #if 1   /* XXX should i remove from idle_heap as well ? */
 1943             fs_remove_from_heap(&(fs->pipe->idle_heap), fs);
 1944 #endif
 1945         }
 1946         purge_flow_set(fs, 1);
 1947         DUMMYNET_UNLOCK();
 1948     }
 1949     return 0 ;
 1950 }
 1951 
 1952 /*
 1953  * helper function used to copy data from kernel in DUMMYNET_GET
 1954  */
 1955 static char *
 1956 dn_copy_set(struct dn_flow_set *set, char *bp)
 1957 {
 1958     int i, copied = 0 ;
 1959     struct dn_flow_queue *q, *qp = (struct dn_flow_queue *)bp;
 1960 
 1961     DUMMYNET_LOCK_ASSERT();
 1962 
 1963     for (i = 0 ; i <= set->rq_size ; i++)
 1964         for (q = set->rq[i] ; q ; q = q->next, qp++ ) {
 1965             if (q->hash_slot != i)
 1966                 printf("dummynet: ++ at %d: wrong slot (have %d, "
 1967                     "should be %d)\n", copied, q->hash_slot, i);
 1968             if (q->fs != set)
 1969                 printf("dummynet: ++ at %d: wrong fs ptr (have %p, should be %p)\n",
 1970                         i, q->fs, set);
 1971             copied++ ;
 1972             bcopy(q, qp, sizeof( *q ) );
 1973             /* cleanup pointers */
 1974             qp->next = NULL ;
 1975             qp->head = qp->tail = NULL ;
 1976             qp->fs = NULL ;
 1977         }
 1978     if (copied != set->rq_elements)
 1979         printf("dummynet: ++ wrong count, have %d should be %d\n",
 1980             copied, set->rq_elements);
 1981     return (char *)qp ;
 1982 }
 1983 
 1984 static size_t
 1985 dn_calc_size(void)
 1986 {
 1987     struct dn_flow_set *fs;
 1988     struct dn_pipe *pipe;
 1989     size_t size = 0;
 1990     int i;
 1991 
 1992     DUMMYNET_LOCK_ASSERT();
 1993     /*
 1994      * Compute size of data structures: list of pipes and flow_sets.
 1995      */
 1996     for (i = 0; i < HASHSIZE; i++) {
 1997         SLIST_FOREACH(pipe, &pipehash[i], next)
 1998                 size += sizeof(*pipe) +
 1999                     pipe->fs.rq_elements * sizeof(struct dn_flow_queue);
 2000         SLIST_FOREACH(fs, &flowsethash[i], next)
 2001                 size += sizeof (*fs) +
 2002                     fs->rq_elements * sizeof(struct dn_flow_queue);
 2003     }
 2004     return size;
 2005 }
 2006 
 2007 static int
 2008 dummynet_get(struct sockopt *sopt)
 2009 {
 2010     char *buf, *bp ; /* bp is the "copy-pointer" */
 2011     size_t size ;
 2012     struct dn_flow_set *fs;
 2013     struct dn_pipe *pipe;
 2014     int error=0, i ;
 2015 
 2016     /* XXX lock held too long */
 2017     DUMMYNET_LOCK();
 2018     /*
 2019      * XXX: Ugly, but we need to allocate memory with M_WAITOK flag and we
 2020      *      cannot use this flag while holding a mutex.
 2021      */
 2022     for (i = 0; i < 10; i++) {
 2023         size = dn_calc_size();
 2024         DUMMYNET_UNLOCK();
 2025         buf = malloc(size, M_TEMP, M_WAITOK);
 2026         DUMMYNET_LOCK();
 2027         if (size == dn_calc_size())
 2028                 break;
 2029         free(buf, M_TEMP);
 2030         buf = NULL;
 2031     }
 2032     if (buf == NULL) {
 2033         DUMMYNET_UNLOCK();
 2034         return ENOBUFS ;
 2035     }
 2036     bp = buf;
 2037     for (i = 0; i < HASHSIZE; i++)
 2038         SLIST_FOREACH(pipe, &pipehash[i], next) {
 2039                 struct dn_pipe *pipe_bp = (struct dn_pipe *)bp;
 2040 
 2041                 /*
 2042                  * Copy pipe descriptor into *bp, convert delay back to ms,
 2043                  * then copy the flow_set descriptor(s) one at a time.
 2044                  * After each flow_set, copy the queue descriptor it owns.
 2045                  */
 2046                 bcopy(pipe, bp, sizeof(*pipe));
 2047                 pipe_bp->delay = (pipe_bp->delay * 1000) / hz;
 2048                 /*
 2049                  * XXX the following is a hack based on ->next being the
 2050                  * first field in dn_pipe and dn_flow_set. The correct
 2051                  * solution would be to move the dn_flow_set to the beginning
 2052                  * of struct dn_pipe.
 2053                  */
 2054                 pipe_bp->next.sle_next = (struct dn_pipe *)DN_IS_PIPE;
 2055                 /* Clean pointers. */
 2056                 pipe_bp->head = pipe_bp->tail = NULL;
 2057                 pipe_bp->fs.next.sle_next = NULL;
 2058                 pipe_bp->fs.pipe = NULL;
 2059                 pipe_bp->fs.rq = NULL;
 2060 
 2061                 bp += sizeof(*pipe) ;
 2062                 bp = dn_copy_set(&(pipe->fs), bp);
 2063         }
 2064 
 2065     for (i = 0; i < HASHSIZE; i++)
 2066         SLIST_FOREACH(fs, &flowsethash[i], next) {
 2067                 struct dn_flow_set *fs_bp = (struct dn_flow_set *)bp;
 2068 
 2069                 bcopy(fs, bp, sizeof(*fs));
 2070                 /* XXX same hack as above */
 2071                 fs_bp->next.sle_next = (struct dn_flow_set *)DN_IS_QUEUE;
 2072                 fs_bp->pipe = NULL;
 2073                 fs_bp->rq = NULL;
 2074                 bp += sizeof(*fs);
 2075                 bp = dn_copy_set(fs, bp);
 2076         }
 2077 
 2078     DUMMYNET_UNLOCK();
 2079 
 2080     error = sooptcopyout(sopt, buf, size);
 2081     free(buf, M_TEMP);
 2082     return error ;
 2083 }
 2084 
 2085 /*
 2086  * Handler for the various dummynet socket options (get, flush, config, del)
 2087  */
 2088 static int
 2089 ip_dn_ctl(struct sockopt *sopt)
 2090 {
 2091     int error = 0 ;
 2092     struct dn_pipe *p, tmp_pipe;
 2093 
 2094     /* Disallow sets in really-really secure mode. */
 2095     if (sopt->sopt_dir == SOPT_SET) {
 2096 #if __FreeBSD_version >= 500034
 2097         error =  securelevel_ge(sopt->sopt_td->td_ucred, 3);
 2098         if (error)
 2099             return (error);
 2100 #else
 2101         if (securelevel >= 3)
 2102             return (EPERM);
 2103 #endif
 2104     }
 2105 
 2106     switch (sopt->sopt_name) {
 2107     default :
 2108         printf("dummynet: -- unknown option %d", sopt->sopt_name);
 2109         return EINVAL ;
 2110 
 2111     case IP_DUMMYNET_GET :
 2112         error = dummynet_get(sopt);
 2113         break ;
 2114 
 2115     case IP_DUMMYNET_FLUSH :
 2116         dummynet_flush() ;
 2117         break ;
 2118 
 2119     case IP_DUMMYNET_CONFIGURE :
 2120         p = &tmp_pipe ;
 2121         error = sooptcopyin(sopt, p, sizeof *p, sizeof *p);
 2122         if (error)
 2123             break ;
 2124         error = config_pipe(p);
 2125         break ;
 2126 
 2127     case IP_DUMMYNET_DEL :      /* remove a pipe or queue */
 2128         p = &tmp_pipe ;
 2129         error = sooptcopyin(sopt, p, sizeof *p, sizeof *p);
 2130         if (error)
 2131             break ;
 2132 
 2133         error = delete_pipe(p);
 2134         break ;
 2135     }
 2136     return error ;
 2137 }
 2138 
 2139 static void
 2140 ip_dn_init(void)
 2141 {
 2142         int i;
 2143 
 2144         if (bootverbose)
 2145                 printf("DUMMYNET with IPv6 initialized (040826)\n");
 2146 
 2147         DUMMYNET_LOCK_INIT();
 2148 
 2149         for (i = 0; i < HASHSIZE; i++) {
 2150                 SLIST_INIT(&pipehash[i]);
 2151                 SLIST_INIT(&flowsethash[i]);
 2152         }
 2153         ready_heap.size = ready_heap.elements = 0;
 2154         ready_heap.offset = 0;
 2155 
 2156         wfq_ready_heap.size = wfq_ready_heap.elements = 0;
 2157         wfq_ready_heap.offset = 0;
 2158 
 2159         extract_heap.size = extract_heap.elements = 0;
 2160         extract_heap.offset = 0;
 2161 
 2162         ip_dn_ctl_ptr = ip_dn_ctl;
 2163         ip_dn_io_ptr = dummynet_io;
 2164         ip_dn_ruledel_ptr = dn_rule_delete;
 2165 
 2166         TASK_INIT(&dn_task, 0, dummynet_task, NULL);
 2167         dn_tq = taskqueue_create_fast("dummynet", M_NOWAIT,
 2168             taskqueue_thread_enqueue, &dn_tq);
 2169         taskqueue_start_threads(&dn_tq, 1, PI_NET, "dummynet");
 2170 
 2171         callout_init(&dn_timeout, NET_CALLOUT_MPSAFE);
 2172         callout_reset(&dn_timeout, 1, dummynet, NULL);
 2173 
 2174         /* Initialize curr_time adjustment mechanics. */
 2175         getmicrouptime(&prev_t);
 2176 }
 2177 
 2178 #ifdef KLD_MODULE
 2179 static void
 2180 ip_dn_destroy(void)
 2181 {
 2182         ip_dn_ctl_ptr = NULL;
 2183         ip_dn_io_ptr = NULL;
 2184         ip_dn_ruledel_ptr = NULL;
 2185 
 2186         DUMMYNET_LOCK();
 2187         callout_stop(&dn_timeout);
 2188         DUMMYNET_UNLOCK();
 2189         taskqueue_drain(dn_tq, &dn_task);
 2190         taskqueue_free(dn_tq);
 2191 
 2192         dummynet_flush();
 2193 
 2194         DUMMYNET_LOCK_DESTROY();
 2195 }
 2196 #endif /* KLD_MODULE */
 2197 
 2198 static int
 2199 dummynet_modevent(module_t mod, int type, void *data)
 2200 {
 2201         switch (type) {
 2202         case MOD_LOAD:
 2203                 if (DUMMYNET_LOADED) {
 2204                     printf("DUMMYNET already loaded\n");
 2205                     return EEXIST ;
 2206                 }
 2207                 ip_dn_init();
 2208                 break;
 2209 
 2210         case MOD_UNLOAD:
 2211 #if !defined(KLD_MODULE)
 2212                 printf("dummynet statically compiled, cannot unload\n");
 2213                 return EINVAL ;
 2214 #else
 2215                 ip_dn_destroy();
 2216 #endif
 2217                 break ;
 2218         default:
 2219                 return EOPNOTSUPP;
 2220                 break ;
 2221         }
 2222         return 0 ;
 2223 }
 2224 
 2225 static moduledata_t dummynet_mod = {
 2226         "dummynet",
 2227         dummynet_modevent,
 2228         NULL
 2229 };
 2230 DECLARE_MODULE(dummynet, dummynet_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY);
 2231 MODULE_DEPEND(dummynet, ipfw, 2, 2, 2);
 2232 MODULE_VERSION(dummynet, 1);

Cache object: 5c7e3781efd36077f19a7c4234772553


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.