The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/altq/altq_hfsc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: altq_hfsc.c,v 1.9 2004/02/13 11:36:09 wiz Exp $        */
    2 /*      $KAME: altq_hfsc.c,v 1.9 2001/10/26 04:56:11 kjc Exp $  */
    3 
    4 /*
    5  * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
    6  *
    7  * Permission to use, copy, modify, and distribute this software and
    8  * its documentation is hereby granted (including for commercial or
    9  * for-profit use), provided that both the copyright notice and this
   10  * permission notice appear in all copies of the software, derivative
   11  * works, or modified versions, and any portions thereof, and that
   12  * both notices appear in supporting documentation, and that credit
   13  * is given to Carnegie Mellon University in all publications reporting
   14  * on direct or indirect use of this code or its derivatives.
   15  *
   16  * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
   17  * WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON PROVIDES THIS
   18  * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
   19  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   21  * DISCLAIMED.  IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
   24  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
   25  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
   26  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
   28  * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   29  * DAMAGE.
   30  *
   31  * Carnegie Mellon encourages (but does not require) users of this
   32  * software to return any improvements or extensions that they make,
   33  * and to grant Carnegie Mellon the rights to redistribute these
   34  * changes without encumbrance.
   35  */
   36 /*
   37  * H-FSC is described in Proceedings of SIGCOMM'97,
   38  * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, 
   39  * Real-Time and Priority Service"
   40  * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
   41  */
   42 
   43 #include <sys/cdefs.h>
   44 __KERNEL_RCSID(0, "$NetBSD: altq_hfsc.c,v 1.9 2004/02/13 11:36:09 wiz Exp $");
   45 
   46 #if defined(__FreeBSD__) || defined(__NetBSD__)
   47 #include "opt_altq.h"
   48 #if (__FreeBSD__ != 2)
   49 #include "opt_inet.h"
   50 #ifdef __FreeBSD__
   51 #include "opt_inet6.h"
   52 #endif
   53 #endif
   54 #endif /* __FreeBSD__ || __NetBSD__ */
   55 
   56 #ifdef ALTQ_HFSC  /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
   57 
   58 #include <sys/param.h>
   59 #include <sys/malloc.h>
   60 #include <sys/mbuf.h>
   61 #include <sys/socket.h>
   62 #include <sys/sockio.h>
   63 #include <sys/systm.h>
   64 #include <sys/proc.h>
   65 #include <sys/errno.h>
   66 #include <sys/kernel.h>
   67 #include <sys/queue.h>
   68 
   69 #include <net/if.h>
   70 #include <net/if_types.h>
   71 
   72 #include <altq/altq.h>
   73 #include <altq/altq_conf.h>
   74 #include <altq/altq_hfsc.h>
   75 
   76 /*
   77  * function prototypes
   78  */
   79 static struct hfsc_if *hfsc_attach __P((struct ifaltq *, u_int));
   80 static int hfsc_detach __P((struct hfsc_if *));
   81 static int hfsc_clear_interface __P((struct hfsc_if *));
   82 static int hfsc_request __P((struct ifaltq *, int, void *));
   83 static void hfsc_purge __P((struct hfsc_if *));
   84 static struct hfsc_class *hfsc_class_create __P((struct hfsc_if *,
   85                  struct service_curve *, struct hfsc_class *, int, int));
   86 static int hfsc_class_destroy __P((struct hfsc_class *));
   87 static int hfsc_class_modify __P((struct hfsc_class *,
   88                           struct service_curve *, struct service_curve *));
   89 static struct hfsc_class *hfsc_nextclass __P((struct hfsc_class *));
   90 
   91 static int hfsc_enqueue __P((struct ifaltq *, struct mbuf *,
   92                              struct altq_pktattr *));
   93 static struct mbuf *hfsc_dequeue __P((struct ifaltq *, int));
   94 
   95 static int hfsc_addq __P((struct hfsc_class *, struct mbuf *));
   96 static struct mbuf *hfsc_getq __P((struct hfsc_class *));
   97 static struct mbuf *hfsc_pollq __P((struct hfsc_class *));
   98 static void hfsc_purgeq __P((struct hfsc_class *));
   99 
  100 static void set_active __P((struct hfsc_class *, int));
  101 static void set_passive __P((struct hfsc_class *));
  102 
  103 static void init_ed __P((struct hfsc_class *, int));
  104 static void update_ed __P((struct hfsc_class *, int));
  105 static void update_d __P((struct hfsc_class *, int));
  106 static void init_v __P((struct hfsc_class *, int));
  107 static void update_v __P((struct hfsc_class *, int));
  108 static ellist_t *ellist_alloc __P((void));
  109 static void ellist_destroy __P((ellist_t *));
  110 static void ellist_insert __P((struct hfsc_class *));
  111 static void ellist_remove __P((struct hfsc_class *));
  112 static void ellist_update __P((struct hfsc_class *));
  113 struct hfsc_class *ellist_get_mindl __P((ellist_t *));
  114 static actlist_t *actlist_alloc __P((void));
  115 static void actlist_destroy __P((actlist_t *));
  116 static void actlist_insert __P((struct hfsc_class *));
  117 static void actlist_remove __P((struct hfsc_class *));
  118 static void actlist_update __P((struct hfsc_class *));
  119 
  120 static __inline u_int64_t seg_x2y __P((u_int64_t, u_int64_t));
  121 static __inline u_int64_t seg_y2x __P((u_int64_t, u_int64_t));
  122 static __inline u_int64_t m2sm __P((u_int));
  123 static __inline u_int64_t m2ism __P((u_int));
  124 static __inline u_int64_t d2dx __P((u_int));
  125 static u_int sm2m __P((u_int64_t));
  126 static u_int dx2d __P((u_int64_t));
  127 
  128 static void sc2isc __P((struct service_curve *, struct internal_sc *));
  129 static void rtsc_init __P((struct runtime_sc *, struct internal_sc *,
  130                            u_int64_t, u_int64_t));
  131 static u_int64_t rtsc_y2x __P((struct runtime_sc *, u_int64_t));
  132 static u_int64_t rtsc_x2y __P((struct runtime_sc *, u_int64_t));
  133 static void rtsc_min __P((struct runtime_sc *, struct internal_sc *,
  134                           u_int64_t, u_int64_t));
  135 
  136 int hfscopen __P((dev_t, int, int, struct proc *));
  137 int hfscclose __P((dev_t, int, int, struct proc *));
  138 int hfscioctl __P((dev_t, ioctlcmd_t, caddr_t, int, struct proc *));
  139 static int hfsccmd_if_attach __P((struct hfsc_attach *));
  140 static int hfsccmd_if_detach __P((struct hfsc_interface *));
  141 static int hfsccmd_add_class __P((struct hfsc_add_class *));
  142 static int hfsccmd_delete_class __P((struct hfsc_delete_class *));
  143 static int hfsccmd_modify_class __P((struct hfsc_modify_class *));
  144 static int hfsccmd_add_filter __P((struct hfsc_add_filter *));
  145 static int hfsccmd_delete_filter __P((struct hfsc_delete_filter *));
  146 static int hfsccmd_class_stats __P((struct hfsc_class_stats *));
  147 static void get_class_stats __P((struct hfsc_basic_class_stats *,
  148     struct hfsc_class *));
  149 static struct hfsc_class *clh_to_clp __P((struct hfsc_if *, u_long));
  150 static u_long clp_to_clh __P((struct hfsc_class *));
  151 
  152 /*
  153  * macros
  154  */
  155 #define is_a_parent_class(cl)   ((cl)->cl_children != NULL)
  156 
  157 /* hif_list keeps all hfsc_if's allocated. */
  158 static struct hfsc_if *hif_list = NULL;
  159 
  160 static struct hfsc_if *
  161 hfsc_attach(ifq, bandwidth)
  162         struct ifaltq *ifq;
  163         u_int bandwidth;
  164 {
  165         struct hfsc_if *hif;
  166         struct service_curve root_sc;
  167 
  168         MALLOC(hif, struct hfsc_if *, sizeof(struct hfsc_if),
  169                M_DEVBUF, M_WAITOK);
  170         if (hif == NULL)
  171                 return (NULL);
  172         (void)memset(hif, 0, sizeof(struct hfsc_if));
  173 
  174         hif->hif_eligible = ellist_alloc();
  175         if (hif->hif_eligible == NULL) {
  176                 FREE(hif, M_DEVBUF);
  177                 return NULL;
  178         }
  179 
  180         hif->hif_ifq = ifq;
  181 
  182         /*
  183          * create root class
  184          */
  185         root_sc.m1 = bandwidth;
  186         root_sc.d = 0;
  187         root_sc.m2 = bandwidth;
  188         if ((hif->hif_rootclass =
  189              hfsc_class_create(hif, &root_sc, NULL, 0, 0)) == NULL) {
  190                 FREE(hif, M_DEVBUF);
  191                 return (NULL);
  192         }
  193 
  194         /* add this state to the hfsc list */
  195         hif->hif_next = hif_list;
  196         hif_list = hif;
  197 
  198         return (hif);
  199 }
  200 
  201 static int
  202 hfsc_detach(hif)
  203         struct hfsc_if *hif;
  204 {
  205         (void)hfsc_clear_interface(hif);
  206         (void)hfsc_class_destroy(hif->hif_rootclass);
  207 
  208         /* remove this interface from the hif list */
  209         if (hif_list == hif)
  210                 hif_list = hif->hif_next;
  211         else {
  212                 struct hfsc_if *h;
  213         
  214                 for (h = hif_list; h != NULL; h = h->hif_next)
  215                         if (h->hif_next == hif) {
  216                                 h->hif_next = hif->hif_next;
  217                                 break;
  218                         }
  219                 ASSERT(h != NULL);
  220         }
  221 
  222         ellist_destroy(hif->hif_eligible);
  223 
  224         FREE(hif, M_DEVBUF);
  225 
  226         return (0);
  227 }
  228 
  229 /*
  230  * bring the interface back to the initial state by discarding
  231  * all the filters and classes except the root class.
  232  */
  233 static int
  234 hfsc_clear_interface(hif)
  235         struct hfsc_if *hif;
  236 {
  237         struct hfsc_class       *cl;
  238 
  239         /* free the filters for this interface */
  240         acc_discard_filters(&hif->hif_classifier, NULL, 1);
  241 
  242         /* clear out the classes */
  243         while ((cl = hif->hif_rootclass->cl_children) != NULL) {
  244                 /*
  245                  * remove the first leaf class found in the hierarchy
  246                  * then start over
  247                  */
  248                 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
  249                         if (!is_a_parent_class(cl)) {
  250                                 (void)hfsc_class_destroy(cl);
  251                                 break;
  252                         }
  253                 }
  254         }
  255         
  256         return (0);
  257 }
  258 
  259 static int
  260 hfsc_request(ifq, req, arg)
  261         struct ifaltq *ifq;
  262         int req;
  263         void *arg;
  264 {
  265         struct hfsc_if  *hif = (struct hfsc_if *)ifq->altq_disc;
  266 
  267         switch (req) {
  268         case ALTRQ_PURGE:
  269                 hfsc_purge(hif);
  270                 break;
  271         }
  272         return (0);
  273 }
  274 
  275 /* discard all the queued packets on the interface */
  276 static void
  277 hfsc_purge(hif)
  278         struct hfsc_if *hif;
  279 {
  280         struct hfsc_class *cl;
  281 
  282         for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
  283                 if (!qempty(cl->cl_q))
  284                         hfsc_purgeq(cl);
  285         if (ALTQ_IS_ENABLED(hif->hif_ifq))
  286                 hif->hif_ifq->ifq_len = 0;
  287 }
  288 
  289 struct hfsc_class *
  290 hfsc_class_create(hif, sc, parent, qlimit, flags)
  291         struct hfsc_if *hif;
  292         struct service_curve *sc;
  293         struct hfsc_class *parent;
  294         int qlimit, flags;
  295 {
  296         struct hfsc_class *cl, *p;
  297         int s;
  298 
  299 #ifndef ALTQ_RED
  300         if (flags & HFCF_RED) {
  301                 printf("hfsc_class_create: RED not configured for HFSC!\n");
  302                 return (NULL);
  303         }
  304 #endif
  305 
  306         MALLOC(cl, struct hfsc_class *, sizeof(struct hfsc_class),
  307                M_DEVBUF, M_WAITOK);
  308         if (cl == NULL)
  309                 return (NULL);
  310         (void)memset(cl, 0, sizeof(struct hfsc_class));
  311 
  312         MALLOC(cl->cl_q, class_queue_t *, sizeof(class_queue_t),
  313                M_DEVBUF, M_WAITOK);
  314         if (cl->cl_q == NULL)
  315                 goto err_ret;
  316         (void)memset(cl->cl_q, 0, sizeof(class_queue_t));
  317 
  318         cl->cl_actc = actlist_alloc();
  319         if (cl->cl_actc == NULL)
  320                 goto err_ret;
  321 
  322         if (qlimit == 0)
  323                 qlimit = 50;  /* use default */
  324         qlimit(cl->cl_q) = qlimit;
  325         qtype(cl->cl_q) = Q_DROPTAIL;
  326         qlen(cl->cl_q) = 0;
  327         cl->cl_flags = flags;
  328 #ifdef ALTQ_RED
  329         if (flags & (HFCF_RED|HFCF_RIO)) {
  330                 int red_flags, red_pkttime;
  331 
  332                 red_flags = 0;
  333                 if (flags & HFCF_ECN)
  334                         red_flags |= REDF_ECN;
  335 #ifdef ALTQ_RIO
  336                 if (flags & HFCF_CLEARDSCP)
  337                         red_flags |= RIOF_CLEARDSCP;
  338 #endif
  339                 if (sc->m2 < 8)
  340                         red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
  341                 else
  342                         red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
  343                                 * 1000 * 1000 * 1000 / (sc->m2 / 8);
  344                 if (flags & HFCF_RED) {
  345                         cl->cl_red = red_alloc(0, 0, 0, 0,
  346                                                red_flags, red_pkttime);
  347                         if (cl->cl_red != NULL)
  348                                 qtype(cl->cl_q) = Q_RED;
  349                 }
  350 #ifdef ALTQ_RIO
  351                 else {
  352                         cl->cl_red = (red_t *)rio_alloc(0, NULL,
  353                                                       red_flags, red_pkttime);
  354                         if (cl->cl_red != NULL)
  355                                 qtype(cl->cl_q) = Q_RIO;
  356                 }
  357 #endif
  358         }
  359 #endif /* ALTQ_RED */
  360 
  361         if (sc != NULL && (sc->m1 != 0 || sc->m2 != 0)) {
  362                 MALLOC(cl->cl_rsc, struct internal_sc *,
  363                        sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
  364                 if (cl->cl_rsc == NULL)
  365                         goto err_ret;
  366                 (void)memset(cl->cl_rsc, 0, sizeof(struct internal_sc));
  367                 sc2isc(sc, cl->cl_rsc);
  368                 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
  369                 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
  370 
  371                 MALLOC(cl->cl_fsc, struct internal_sc *,
  372                        sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
  373                 if (cl->cl_fsc == NULL)
  374                         goto err_ret;
  375                 (void)memset(cl->cl_fsc, 0, sizeof(struct internal_sc));
  376                 sc2isc(sc, cl->cl_fsc);
  377                 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
  378         }
  379 
  380         cl->cl_id = hif->hif_classid++;
  381         cl->cl_handle = (u_long)cl;  /* XXX: just a pointer to this class */
  382         cl->cl_hif = hif;
  383         cl->cl_parent = parent;
  384 
  385         s = splnet();
  386         hif->hif_classes++;
  387         if (flags & HFCF_DEFAULTCLASS)
  388                 hif->hif_defaultclass = cl;
  389 
  390         /* add this class to the children list of the parent */
  391         if (parent == NULL) {
  392                 /* this is root class */
  393         }
  394         else if ((p = parent->cl_children) == NULL)
  395                 parent->cl_children = cl;
  396         else {
  397                 while (p->cl_siblings != NULL)
  398                         p = p->cl_siblings;
  399                 p->cl_siblings = cl;
  400         }
  401         splx(s);
  402 
  403         return (cl);
  404 
  405  err_ret:
  406         if (cl->cl_actc != NULL)
  407                 actlist_destroy(cl->cl_actc);
  408         if (cl->cl_red != NULL) {
  409 #ifdef ALTQ_RIO
  410                 if (q_is_rio(cl->cl_q))
  411                         rio_destroy((rio_t *)cl->cl_red);
  412 #endif
  413 #ifdef ALTQ_RED
  414                 if (q_is_red(cl->cl_q))
  415                         red_destroy(cl->cl_red);
  416 #endif
  417         }
  418         if (cl->cl_fsc != NULL)
  419                 FREE(cl->cl_fsc, M_DEVBUF);
  420         if (cl->cl_rsc != NULL)
  421                 FREE(cl->cl_rsc, M_DEVBUF);
  422         if (cl->cl_q != NULL)
  423                 FREE(cl->cl_q, M_DEVBUF);
  424         FREE(cl, M_DEVBUF);
  425         return (NULL);
  426 }
  427 
  428 static int
  429 hfsc_class_destroy(cl)
  430         struct hfsc_class *cl;
  431 {
  432         int s;
  433 
  434         if (is_a_parent_class(cl))
  435                 return (EBUSY);
  436 
  437         s = splnet();
  438 
  439         /* delete filters referencing to this class */
  440         acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
  441 
  442         if (!qempty(cl->cl_q))
  443                 hfsc_purgeq(cl);
  444 
  445         if (cl->cl_parent == NULL) {
  446                 /* this is root class */
  447         } else {
  448                 struct hfsc_class *p = cl->cl_parent->cl_children;
  449 
  450                 if (p == cl)
  451                         cl->cl_parent->cl_children = cl->cl_siblings;
  452                 else do {
  453                         if (p->cl_siblings == cl) {
  454                                 p->cl_siblings = cl->cl_siblings;
  455                                 break;
  456                         }
  457                 } while ((p = p->cl_siblings) != NULL);
  458                 ASSERT(p != NULL);
  459         }
  460         cl->cl_hif->hif_classes--;
  461         splx(s);
  462 
  463         actlist_destroy(cl->cl_actc);
  464 
  465         if (cl->cl_red != NULL) {
  466 #ifdef ALTQ_RIO
  467                 if (q_is_rio(cl->cl_q))
  468                         rio_destroy((rio_t *)cl->cl_red);
  469 #endif
  470 #ifdef ALTQ_RED
  471                 if (q_is_red(cl->cl_q))
  472                         red_destroy(cl->cl_red);
  473 #endif
  474         }
  475         if (cl->cl_fsc != NULL)
  476                 FREE(cl->cl_fsc, M_DEVBUF);
  477         if (cl->cl_rsc != NULL)
  478                 FREE(cl->cl_rsc, M_DEVBUF);
  479         FREE(cl->cl_q, M_DEVBUF);
  480         FREE(cl, M_DEVBUF);
  481 
  482         return (0);
  483 }
  484 
  485 static int
  486 hfsc_class_modify(cl, rsc, fsc)
  487         struct hfsc_class *cl;
  488         struct service_curve *rsc, *fsc;
  489 {
  490         struct internal_sc *rsc_tmp, *fsc_tmp;
  491         int s;
  492 
  493         if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) &&
  494             cl->cl_rsc == NULL) {
  495                 MALLOC(rsc_tmp, struct internal_sc *,
  496                        sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
  497                 if (rsc_tmp == NULL)
  498                         return (ENOMEM);
  499                 (void)memset(rsc_tmp, 0, sizeof(struct internal_sc));
  500         } else
  501                 rsc_tmp = NULL;
  502         if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) &&
  503             cl->cl_fsc == NULL) {
  504                 MALLOC(fsc_tmp, struct internal_sc *,
  505                        sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
  506                 if (fsc_tmp == NULL)
  507                         return (ENOMEM);
  508                 (void)memset(fsc_tmp, 0, sizeof(struct internal_sc));
  509         } else
  510                 fsc_tmp = NULL;
  511 
  512         s = splnet();
  513         if (!qempty(cl->cl_q))
  514                 hfsc_purgeq(cl);
  515 
  516         if (rsc != NULL) {
  517                 if (rsc->m1 == 0 && rsc->m2 == 0) {
  518                         if (cl->cl_rsc != NULL) {
  519                                 FREE(cl->cl_rsc, M_DEVBUF);
  520                                 cl->cl_rsc = NULL;
  521                         }
  522                 } else {
  523                         if (cl->cl_rsc == NULL)
  524                                 cl->cl_rsc = rsc_tmp;
  525                         sc2isc(rsc, cl->cl_rsc);
  526                         rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
  527                         rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
  528                 }
  529         }
  530 
  531         if (fsc != NULL) {
  532                 if (fsc->m1 == 0 && fsc->m2 == 0) {
  533                         if (cl->cl_fsc != NULL) {
  534                                 FREE(cl->cl_fsc, M_DEVBUF);
  535                                 cl->cl_fsc = NULL;
  536                         }
  537                 } else {
  538                         if (cl->cl_fsc == NULL)
  539                                 cl->cl_fsc = fsc_tmp;
  540                         sc2isc(fsc, cl->cl_fsc);
  541                         rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
  542                 }
  543         }
  544         splx(s);
  545 
  546         return (0);
  547 }
  548 
  549 /*
  550  * hfsc_nextclass returns the next class in the tree.
  551  *   usage:
  552  *      for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
  553  *              do_something;
  554  */
  555 static struct hfsc_class *
  556 hfsc_nextclass(cl)
  557         struct hfsc_class *cl;
  558 {
  559         if (cl->cl_children != NULL)
  560                 cl = cl->cl_children;
  561         else if (cl->cl_siblings != NULL)
  562                 cl = cl->cl_siblings;
  563         else {
  564                 while ((cl = cl->cl_parent) != NULL)
  565                         if (cl->cl_siblings) {
  566                                 cl = cl->cl_siblings;
  567                                 break;
  568                         }
  569         }
  570 
  571         return (cl);
  572 }
  573 
  574 /*
  575  * hfsc_enqueue is an enqueue function to be registered to
  576  * (*altq_enqueue) in struct ifaltq.
  577  */
  578 static int 
  579 hfsc_enqueue(ifq, m, pktattr)
  580         struct ifaltq *ifq;
  581         struct mbuf *m;
  582         struct altq_pktattr *pktattr;
  583 {
  584         struct hfsc_if  *hif = (struct hfsc_if *)ifq->altq_disc;
  585         struct hfsc_class *cl;
  586         int len;
  587 
  588         /* grab class set by classifier */
  589         if (pktattr == NULL || (cl = pktattr->pattr_class) == NULL)
  590                 cl = hif->hif_defaultclass;
  591         cl->cl_pktattr = pktattr;  /* save proto hdr used by ECN */
  592 
  593         len = m_pktlen(m);
  594         if (hfsc_addq(cl, m) != 0) {
  595                 /* drop occurred.  mbuf was freed in hfsc_addq. */
  596                 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
  597                 return (ENOBUFS);
  598         }
  599         IFQ_INC_LEN(ifq);
  600         cl->cl_hif->hif_packets++;
  601 
  602         /* successfully queued. */
  603         if (qlen(cl->cl_q) == 1)
  604                 set_active(cl, m_pktlen(m));
  605 
  606 #ifdef HFSC_PKTLOG
  607         /* put the logging_hook here */
  608 #endif
  609         return (0);
  610 }
  611 
  612 /*
  613  * hfsc_dequeue is a dequeue function to be registered to
  614  * (*altq_dequeue) in struct ifaltq.
  615  *
  616  * note: ALTDQ_POLL returns the next packet without removing the packet
  617  *      from the queue.  ALTDQ_REMOVE is a normal dequeue operation.
  618  *      ALTDQ_REMOVE must return the same packet if called immediately
  619  *      after ALTDQ_POLL.
  620  */
  621 static struct mbuf *
  622 hfsc_dequeue(ifq, op)
  623         struct ifaltq   *ifq;
  624         int             op;
  625 {
  626         struct hfsc_if  *hif = (struct hfsc_if *)ifq->altq_disc;
  627         struct hfsc_class *cl;
  628         struct mbuf *m;
  629         int len, next_len;
  630         int realtime = 0;
  631 
  632         if (hif->hif_packets == 0)
  633                 /* no packet in the tree */
  634                 return (NULL);
  635 
  636         if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
  637                 u_int64_t cur_time;
  638                 
  639                 cl = hif->hif_pollcache;
  640                 hif->hif_pollcache = NULL;
  641                 /* check if the class was scheduled by real-time criteria */
  642                 if (cl->cl_rsc != NULL) {
  643                         cur_time = read_machclk();
  644                         realtime = (cl->cl_e <= cur_time);
  645                 }
  646         } else {
  647                 /*
  648                  * if there are eligible classes, use real-time criteria.
  649                  * find the class with the minimum deadline among
  650                  * the eligible classes.
  651                  */
  652                 if ((cl = ellist_get_mindl(hif->hif_eligible)) != NULL) {
  653                         realtime = 1;
  654                 } else {
  655                         /*
  656                          * use link-sharing criteria
  657                          * get the class with the minimum vt in the hierarchy
  658                          */
  659                         cl = hif->hif_rootclass;
  660                         while (is_a_parent_class(cl)) {
  661                                 cl = actlist_first(cl->cl_actc);
  662                                 if (cl == NULL)
  663                                         return (NULL);
  664                         }
  665                 }
  666 
  667                 if (op == ALTDQ_POLL) {
  668                         hif->hif_pollcache = cl;
  669                         m = hfsc_pollq(cl);
  670                         return (m);
  671                 }
  672         }
  673 
  674         m = hfsc_getq(cl);
  675         len = m_pktlen(m);
  676         cl->cl_hif->hif_packets--;
  677         IFQ_DEC_LEN(ifq);
  678         PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
  679 
  680         update_v(cl, len);
  681         if (realtime)
  682                 cl->cl_cumul += len;
  683 
  684         if (!qempty(cl->cl_q)) {
  685                 if (cl->cl_rsc != NULL) {
  686                         /* update ed */
  687                         next_len = m_pktlen(qhead(cl->cl_q));
  688                 
  689                         if (realtime)
  690                                 update_ed(cl, next_len);
  691                         else
  692                                 update_d(cl, next_len);
  693                 }
  694         } else {
  695                 /* the class becomes passive */
  696                 set_passive(cl);
  697         }
  698 
  699 #ifdef HFSC_PKTLOG
  700         /* put the logging_hook here */
  701 #endif
  702 
  703         return (m);
  704 }
  705 
  706 static int
  707 hfsc_addq(cl, m)
  708         struct hfsc_class *cl;
  709         struct mbuf *m;
  710 {
  711 
  712 #ifdef ALTQ_RIO
  713         if (q_is_rio(cl->cl_q))
  714                 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
  715                                 m, cl->cl_pktattr);
  716 #endif
  717 #ifdef ALTQ_RED
  718         if (q_is_red(cl->cl_q))
  719                 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
  720 #endif
  721         if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
  722                 m_freem(m);
  723                 return (-1);
  724         }
  725 
  726         if (cl->cl_flags & HFCF_CLEARDSCP)
  727                 write_dsfield(m, cl->cl_pktattr, 0);
  728 
  729         _addq(cl->cl_q, m);
  730 
  731         return (0);
  732 }
  733 
  734 static struct mbuf *
  735 hfsc_getq(cl)
  736         struct hfsc_class *cl;
  737 {
  738 #ifdef ALTQ_RIO
  739         if (q_is_rio(cl->cl_q))
  740                 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
  741 #endif
  742 #ifdef ALTQ_RED
  743         if (q_is_red(cl->cl_q))
  744                 return red_getq(cl->cl_red, cl->cl_q);
  745 #endif
  746         return _getq(cl->cl_q);
  747 }
  748 
  749 static struct mbuf *
  750 hfsc_pollq(cl)
  751         struct hfsc_class *cl;
  752 {
  753         return qhead(cl->cl_q);
  754 }
  755 
  756 static void
  757 hfsc_purgeq(cl)
  758         struct hfsc_class *cl;
  759 {
  760         struct mbuf *m;
  761 
  762         if (qempty(cl->cl_q))
  763                 return;
  764 
  765         while ((m = _getq(cl->cl_q)) != NULL) {
  766                 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
  767                 m_freem(m);
  768         }
  769         ASSERT(qlen(cl->cl_q) == 0);
  770         
  771         set_passive(cl);
  772 }
  773 
  774 static void 
  775 set_active(cl, len)
  776         struct hfsc_class *cl;
  777         int len;
  778 {
  779         if (cl->cl_rsc != NULL)
  780                 init_ed(cl, len);
  781         if (cl->cl_fsc != NULL)
  782                 init_v(cl, len);
  783 
  784         cl->cl_stats.period++;
  785 }
  786 
  787 static void 
  788 set_passive(cl)
  789         struct hfsc_class *cl;
  790 {
  791         if (cl->cl_rsc != NULL)
  792                 ellist_remove(cl);
  793 
  794         if (cl->cl_fsc != NULL) {
  795                 while (cl->cl_parent != NULL) {
  796                         if (--cl->cl_nactive == 0) {
  797                                 /* remove this class from the vt list */
  798                                 actlist_remove(cl);
  799                         } else
  800                                 /* still has active children */
  801                                 break;
  802 
  803                         /* go up to the parent class */
  804                         cl = cl->cl_parent;
  805                 }
  806         }
  807 }
  808 
  809 static void 
  810 init_ed(cl, next_len)
  811         struct hfsc_class *cl;
  812         int next_len;
  813 {
  814         u_int64_t cur_time;
  815 
  816         cur_time = read_machclk();
  817 
  818         /* update the deadline curve */
  819         rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
  820 
  821         /*
  822          * update the eligible curve.
  823          * for concave, it is equal to the deadline curve.
  824          * for convex, it is a linear curve with slope m2.
  825          */
  826         cl->cl_eligible = cl->cl_deadline;
  827         if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
  828                 cl->cl_eligible.dx = 0;
  829                 cl->cl_eligible.dy = 0;
  830         }
  831 
  832         /* compute e and d */
  833         cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
  834         cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
  835 
  836         ellist_insert(cl);
  837 }
  838 
  839 static void 
  840 update_ed(cl, next_len)
  841         struct hfsc_class *cl;
  842         int next_len;
  843 {
  844         cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
  845         cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
  846 
  847         ellist_update(cl);
  848 }
  849 
  850 static void 
  851 update_d(cl, next_len)
  852         struct hfsc_class *cl;
  853         int next_len;
  854 {
  855         cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
  856 }
  857 
  858 static void 
  859 init_v(cl, len)
  860         struct hfsc_class *cl;
  861         int len;
  862 {
  863         struct hfsc_class *min_cl, *max_cl;
  864 
  865         while (cl->cl_parent != NULL) {
  866                 
  867                 if (cl->cl_nactive++ > 0)
  868                         /* already active */
  869                         break;
  870 
  871                 /*
  872                  * if parent became idle while this class was idle.
  873                  * reset vt and the runtime service curve.
  874                  */
  875                 if (cl->cl_parent->cl_nactive == 0 ||
  876                     cl->cl_parent->cl_vtperiod != cl->cl_parentperiod) {
  877                         cl->cl_vt = 0;
  878                         rtsc_init(&cl->cl_virtual, cl->cl_fsc,
  879                                   0, cl->cl_total);
  880                 }
  881                 min_cl = actlist_first(cl->cl_parent->cl_actc);
  882                 if (min_cl != NULL) {
  883                         u_int64_t vt;
  884 
  885                         /*
  886                          * set vt to the average of the min and max classes.
  887                          * if the parent's period didn't change,
  888                          * don't decrease vt of the class.
  889                          */
  890                         max_cl = actlist_last(cl->cl_parent->cl_actc);
  891                         vt = (min_cl->cl_vt + max_cl->cl_vt) / 2;
  892                         if (cl->cl_parent->cl_vtperiod != cl->cl_parentperiod
  893                             || vt > cl->cl_vt)
  894                                 cl->cl_vt = vt;
  895                 }
  896 
  897                 /* update the virtual curve */
  898                 rtsc_min(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt, cl->cl_total);
  899 
  900                 cl->cl_vtperiod++;  /* increment vt period */
  901                 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
  902                 if (cl->cl_parent->cl_nactive == 0)
  903                         cl->cl_parentperiod++;
  904 
  905                 actlist_insert(cl);
  906 
  907                 /* go up to the parent class */
  908                 cl = cl->cl_parent;
  909         }
  910 }
  911 
  912 static void 
  913 update_v(cl, len)
  914         struct hfsc_class *cl;
  915         int len;
  916 {
  917         while (cl->cl_parent != NULL) {
  918 
  919                 cl->cl_total += len;
  920 
  921                 if (cl->cl_fsc != NULL) {
  922                         cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total);
  923 
  924                         /* update the vt list */
  925                         actlist_update(cl);
  926                 }
  927 
  928                 /* go up to the parent class */
  929                 cl = cl->cl_parent;
  930         }
  931 }
  932 
  933 /*
  934  * TAILQ based ellist and actlist implementation
  935  * (ion wanted to make a calendar queue based implementation)
  936  */
  937 /*
  938  * eligible list holds backlogged classes being sorted by their eligible times.
  939  * there is one eligible list per interface.
  940  */
  941 
  942 static ellist_t *
  943 ellist_alloc()
  944 {
  945         ellist_t *head;
  946         
  947         MALLOC(head, ellist_t *, sizeof(ellist_t), M_DEVBUF, M_WAITOK);
  948         TAILQ_INIT(head);
  949         return (head);
  950 }
  951 
  952 static void
  953 ellist_destroy(head)
  954         ellist_t *head;
  955 {
  956         FREE(head, M_DEVBUF);
  957 }
  958 
  959 static void 
  960 ellist_insert(cl)
  961         struct hfsc_class *cl;
  962 {
  963         struct hfsc_if  *hif = cl->cl_hif;
  964         struct hfsc_class *p;
  965 
  966         /* check the last entry first */
  967         if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
  968             p->cl_e <= cl->cl_e) {
  969                 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
  970                 return;
  971         }
  972 
  973         TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
  974                 if (cl->cl_e < p->cl_e) {
  975                         TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
  976                         return;
  977                 }
  978         }
  979         ASSERT(0); /* should not reach here */
  980 }
  981 
  982 static void 
  983 ellist_remove(cl)
  984         struct hfsc_class *cl;
  985 {
  986         struct hfsc_if  *hif = cl->cl_hif;
  987         
  988         TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
  989 }
  990 
  991 static void 
  992 ellist_update(cl)
  993         struct hfsc_class *cl;
  994 {
  995         struct hfsc_if  *hif = cl->cl_hif;
  996         struct hfsc_class *p, *last;
  997 
  998         /*
  999          * the eligible time of a class increases monotonically.
 1000          * if the next entry has a larger eligible time, nothing to do.
 1001          */
 1002         p = TAILQ_NEXT(cl, cl_ellist);
 1003         if (p == NULL || cl->cl_e <= p->cl_e)
 1004                 return;
 1005 
 1006         /* check the last entry */
 1007         last = TAILQ_LAST(hif->hif_eligible, _eligible);
 1008         ASSERT(last != NULL);
 1009         if (last->cl_e <= cl->cl_e) {
 1010                 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
 1011                 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
 1012                 return;
 1013         }
 1014 
 1015         /*
 1016          * the new position must be between the next entry
 1017          * and the last entry
 1018          */
 1019         while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
 1020                 if (cl->cl_e < p->cl_e) {
 1021                         TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
 1022                         TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
 1023                         return;
 1024                 }
 1025         }
 1026         ASSERT(0); /* should not reach here */
 1027 }
 1028 
 1029 /* find the class with the minimum deadline among the eligible classes */
 1030 struct hfsc_class *
 1031 ellist_get_mindl(head)
 1032         ellist_t *head;
 1033 {
 1034         struct hfsc_class *p, *cl = NULL;
 1035         u_int64_t cur_time;
 1036 
 1037         cur_time = read_machclk();
 1038 
 1039         TAILQ_FOREACH(p, head, cl_ellist) {
 1040                 if (p->cl_e > cur_time)
 1041                         break;
 1042                 if (cl == NULL || p->cl_d < cl->cl_d)
 1043                         cl = p;
 1044         }
 1045         return (cl);
 1046 }
 1047 
 1048 /*
 1049  * active children list holds backlogged child classes being sorted
 1050  * by their virtual time.
 1051  * each intermediate class has one active children list.
 1052  */
 1053 static actlist_t *
 1054 actlist_alloc()
 1055 {
 1056         actlist_t *head;
 1057         
 1058         MALLOC(head, actlist_t *, sizeof(actlist_t), M_DEVBUF, M_WAITOK);
 1059         TAILQ_INIT(head);
 1060         return (head);
 1061 }
 1062 
 1063 static void
 1064 actlist_destroy(head)
 1065         actlist_t *head;
 1066 {
 1067         FREE(head, M_DEVBUF);
 1068 }
 1069 static void 
 1070 actlist_insert(cl)
 1071         struct hfsc_class *cl;
 1072 {
 1073         struct hfsc_class *p;
 1074 
 1075         /* check the last entry first */
 1076         if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
 1077             || p->cl_vt <= cl->cl_vt) {
 1078                 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
 1079                 return;
 1080         }
 1081 
 1082         TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
 1083                 if (cl->cl_vt < p->cl_vt) {
 1084                         TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
 1085                         return;
 1086                 }
 1087         }
 1088         ASSERT(0); /* should not reach here */
 1089 }
 1090 
 1091 static void 
 1092 actlist_remove(cl)
 1093         struct hfsc_class *cl;
 1094 {
 1095         TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
 1096 }
 1097 
 1098 static void
 1099 actlist_update(cl)
 1100         struct hfsc_class *cl;
 1101 {
 1102         struct hfsc_class *p, *last;
 1103 
 1104         /*
 1105          * the virtual time of a class increases monotonically during its
 1106          * backlogged period.
 1107          * if the next entry has a larger virtual time, nothing to do.
 1108          */
 1109         p = TAILQ_NEXT(cl, cl_actlist);
 1110         if (p == NULL || cl->cl_vt <= p->cl_vt)
 1111                 return;
 1112 
 1113         /* check the last entry */
 1114         last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
 1115         ASSERT(last != NULL);
 1116         if (last->cl_vt <= cl->cl_vt) {
 1117                 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
 1118                 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
 1119                 return;
 1120         }
 1121 
 1122         /*
 1123          * the new position must be between the next entry
 1124          * and the last entry
 1125          */
 1126         while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
 1127                 if (cl->cl_vt < p->cl_vt) {
 1128                         TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
 1129                         TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
 1130                         return;
 1131                 }
 1132         }
 1133         ASSERT(0); /* should not reach here */
 1134 }
 1135 
 1136 /*
 1137  * service curve support functions
 1138  *
 1139  *  external service curve parameters
 1140  *      m: bits/sec
 1141  *      d: msec
 1142  *  internal service curve parameters
 1143  *      sm: (bytes/tsc_interval) << SM_SHIFT
 1144  *      ism: (tsc_count/byte) << ISM_SHIFT
 1145  *      dx: tsc_count
 1146  *
 1147  * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
 1148  * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
 1149  * speed.  SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
 1150  * digits in decimal using the following table.
 1151  *
 1152  *  bits/set    100Kbps     1Mbps     10Mbps     100Mbps    1Gbps
 1153  *  ----------+-------------------------------------------------------
 1154  *  bytes/nsec  12.5e-6    125e-6     1250e-6    12500e-6   125000e-6
 1155  *  sm(500MHz)  25.0e-6    250e-6     2500e-6    25000e-6   250000e-6
 1156  *  sm(200MHz)  62.5e-6    625e-6     6250e-6    62500e-6   625000e-6
 1157  * 
 1158  *  nsec/byte   80000      8000       800        80         8
 1159  *  ism(500MHz) 40000      4000       400        40         4
 1160  *  ism(200MHz) 16000      1600       160        16         1.6
 1161  */
 1162 #define SM_SHIFT        24
 1163 #define ISM_SHIFT       10
 1164 
 1165 #define SC_LARGEVAL     (1LL << 32)
 1166 #define SC_INFINITY     0xffffffffffffffffLL
 1167 
 1168 static __inline u_int64_t 
 1169 seg_x2y(x, sm)
 1170         u_int64_t x;
 1171         u_int64_t sm;
 1172 {
 1173         u_int64_t y;
 1174 
 1175         if (x < SC_LARGEVAL)
 1176                 y = x * sm >> SM_SHIFT;
 1177         else
 1178                 y = (x >> SM_SHIFT) * sm;
 1179         return (y);
 1180 }
 1181 
 1182 static __inline u_int64_t 
 1183 seg_y2x(y, ism)
 1184         u_int64_t y;
 1185         u_int64_t ism;
 1186 {
 1187         u_int64_t x;
 1188 
 1189         if (y == 0)
 1190                 x = 0;
 1191         else if (ism == SC_INFINITY)
 1192                 x = SC_INFINITY;
 1193         else if (y < SC_LARGEVAL)
 1194                 x = y * ism >> ISM_SHIFT;
 1195         else
 1196                 x = (y >> ISM_SHIFT) * ism;
 1197         return (x);
 1198 }
 1199 
 1200 static __inline u_int64_t 
 1201 m2sm(m)
 1202         u_int m;
 1203 {
 1204         u_int64_t sm;
 1205 
 1206         sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
 1207         return (sm);
 1208 }
 1209 
 1210 static __inline u_int64_t 
 1211 m2ism(m)
 1212         u_int m;
 1213 {
 1214         u_int64_t ism;
 1215 
 1216         if (m == 0)
 1217                 ism = SC_INFINITY;
 1218         else
 1219                 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
 1220         return (ism);
 1221 }
 1222 
 1223 static __inline u_int64_t 
 1224 d2dx(d)
 1225         u_int   d;
 1226 {
 1227         u_int64_t dx;
 1228         
 1229         dx = ((u_int64_t)d * machclk_freq) / 1000;
 1230         return (dx);
 1231 }
 1232 
 1233 static u_int 
 1234 sm2m(sm)
 1235         u_int64_t sm;
 1236 {
 1237         u_int64_t m;
 1238 
 1239         m = (sm * 8 * machclk_freq) >> SM_SHIFT;
 1240         return ((u_int)m);
 1241 }
 1242 
 1243 static u_int 
 1244 dx2d(dx)
 1245         u_int64_t dx;
 1246 {
 1247         u_int64_t d;
 1248 
 1249         d = dx * 1000 / machclk_freq;
 1250         return ((u_int)d);
 1251 }
 1252 
 1253 static void 
 1254 sc2isc(sc, isc)
 1255         struct service_curve    *sc;
 1256         struct internal_sc      *isc;
 1257 {
 1258         isc->sm1 = m2sm(sc->m1);
 1259         isc->ism1 = m2ism(sc->m1);
 1260         isc->dx = d2dx(sc->d);
 1261         isc->dy = seg_x2y(isc->dx, isc->sm1);
 1262         isc->sm2 = m2sm(sc->m2);
 1263         isc->ism2 = m2ism(sc->m2);
 1264 }
 1265 
 1266 /*
 1267  * initialize the runtime service curve with the given internal
 1268  * service curve starting at (x, y).
 1269  */
 1270 static void 
 1271 rtsc_init(rtsc, isc, x, y)
 1272         struct runtime_sc       *rtsc;
 1273         struct internal_sc      *isc;
 1274         u_int64_t               x, y;
 1275 {
 1276         rtsc->x =       x;
 1277         rtsc->y =       y;
 1278         rtsc->sm1 =     isc->sm1;
 1279         rtsc->ism1 =    isc->ism1;
 1280         rtsc->dx =      isc->dx;
 1281         rtsc->dy =      isc->dy;
 1282         rtsc->sm2 =     isc->sm2;
 1283         rtsc->ism2 =    isc->ism2;
 1284 }
 1285 
 1286 /*
 1287  * calculate the y-projection of the runtime service curve by the
 1288  * given x-projection value
 1289  */
 1290 static u_int64_t 
 1291 rtsc_y2x(rtsc, y)
 1292         struct runtime_sc       *rtsc;
 1293         u_int64_t               y;
 1294 {
 1295         u_int64_t       x;
 1296 
 1297         if (y < rtsc->y)
 1298                 x = rtsc->x;
 1299         else if (y <= rtsc->y + rtsc->dy) {
 1300                 /* x belongs to the 1st segment */
 1301                 if (rtsc->dy == 0)
 1302                         x = rtsc->x + rtsc->dx;
 1303                 else
 1304                         x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
 1305         } else {
 1306                 /* x belongs to the 2nd segment */
 1307                 x = rtsc->x + rtsc->dx
 1308                     + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
 1309         }
 1310         return (x);
 1311 }
 1312 
 1313 static u_int64_t 
 1314 rtsc_x2y(rtsc, x)
 1315         struct runtime_sc       *rtsc;
 1316         u_int64_t               x;
 1317 {
 1318         u_int64_t       y;
 1319 
 1320         if (x <= rtsc->x)
 1321                 y = rtsc->y;
 1322         else if (x <= rtsc->x + rtsc->dx)
 1323                 /* y belongs to the 1st segment */
 1324                 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
 1325         else
 1326                 /* y belongs to the 2nd segment */
 1327                 y = rtsc->y + rtsc->dy
 1328                     + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
 1329         return (y);
 1330 }
 1331 
 1332 /*
 1333  * update the runtime service curve by taking the minimum of the current
 1334  * runtime service curve and the service curve starting at (x, y).
 1335  */
 1336 static void 
 1337 rtsc_min(rtsc, isc, x, y)
 1338         struct runtime_sc       *rtsc;
 1339         struct internal_sc      *isc;
 1340         u_int64_t               x, y;
 1341 {
 1342         u_int64_t       y1, y2, dx, dy;
 1343 
 1344         if (isc->sm1 <= isc->sm2) {
 1345                 /* service curve is convex */
 1346                 y1 = rtsc_x2y(rtsc, x);
 1347                 if (y1 < y)
 1348                         /* the current rtsc is smaller */
 1349                         return;
 1350                 rtsc->x = x;
 1351                 rtsc->y = y;
 1352                 return;
 1353         }
 1354 
 1355         /*
 1356          * service curve is concave
 1357          * compute the two y values of the current rtsc
 1358          *      y1: at x
 1359          *      y2: at (x + dx)
 1360          */
 1361         y1 = rtsc_x2y(rtsc, x);
 1362         if (y1 <= y) {
 1363                 /* rtsc is below isc, no change to rtsc */
 1364                 return;
 1365         }
 1366 
 1367         y2 = rtsc_x2y(rtsc, x + isc->dx);
 1368         if (y2 >= y + isc->dy) {
 1369                 /* rtsc is above isc, replace rtsc by isc */
 1370                 rtsc->x = x;
 1371                 rtsc->y = y;
 1372                 rtsc->dx = isc->dx;
 1373                 rtsc->dy = isc->dy;
 1374                 return;
 1375         }
 1376 
 1377         /*
 1378          * the two curves intersect
 1379          * compute the offsets (dx, dy) using the reverse
 1380          * function of seg_x2y()
 1381          *      seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
 1382          */
 1383         dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
 1384         /*
 1385          * check if (x, y1) belongs to the 1st segment of rtsc.
 1386          * if so, add the offset.
 1387          */ 
 1388         if (rtsc->x + rtsc->dx > x)
 1389                 dx += rtsc->x + rtsc->dx - x;
 1390         dy = seg_x2y(dx, isc->sm1);
 1391 
 1392         rtsc->x = x;
 1393         rtsc->y = y;
 1394         rtsc->dx = dx;
 1395         rtsc->dy = dy;
 1396         return;
 1397 }
 1398 
 1399 /*
 1400  * hfsc device interface
 1401  */
 1402 int
 1403 hfscopen(dev, flag, fmt, p)
 1404         dev_t dev;
 1405         int flag, fmt;
 1406         struct proc *p;
 1407 {
 1408         if (machclk_freq == 0)
 1409                 init_machclk();
 1410 
 1411         if (machclk_freq == 0) {
 1412                 printf("hfsc: no CPU clock available!\n");
 1413                 return (ENXIO);
 1414         }
 1415 
 1416         /* everything will be done when the queueing scheme is attached. */
 1417         return 0;
 1418 }
 1419 
 1420 int
 1421 hfscclose(dev, flag, fmt, p)
 1422         dev_t dev;
 1423         int flag, fmt;
 1424         struct proc *p;
 1425 {
 1426         struct hfsc_if *hif;
 1427         int err, error = 0;
 1428 
 1429         while ((hif = hif_list) != NULL) {
 1430                 /* destroy all */
 1431                 if (ALTQ_IS_ENABLED(hif->hif_ifq))
 1432                         altq_disable(hif->hif_ifq);
 1433 
 1434                 err = altq_detach(hif->hif_ifq);
 1435                 if (err == 0)
 1436                         err = hfsc_detach(hif);
 1437                 if (err != 0 && error == 0)
 1438                         error = err;
 1439         }
 1440 
 1441         return error;
 1442 }
 1443 
 1444 int
 1445 hfscioctl(dev, cmd, addr, flag, p)
 1446         dev_t dev;
 1447         ioctlcmd_t cmd;
 1448         caddr_t addr;
 1449         int flag;
 1450         struct proc *p;
 1451 {
 1452         struct hfsc_if *hif;
 1453         struct hfsc_interface *ifacep;
 1454         int     error = 0;
 1455 
 1456         /* check super-user privilege */
 1457         switch (cmd) {
 1458         case HFSC_GETSTATS:
 1459                 break;
 1460         default:
 1461 #if (__FreeBSD_version > 400000)
 1462                 if ((error = suser(p)) != 0)
 1463                         return (error);
 1464 #else
 1465                 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
 1466                         return (error);
 1467 #endif
 1468                 break;
 1469         }
 1470     
 1471         switch (cmd) {
 1472 
 1473         case HFSC_IF_ATTACH:
 1474                 error = hfsccmd_if_attach((struct hfsc_attach *)addr);
 1475                 break;
 1476 
 1477         case HFSC_IF_DETACH:
 1478                 error = hfsccmd_if_detach((struct hfsc_interface *)addr);
 1479                 break;
 1480 
 1481         case HFSC_ENABLE:
 1482         case HFSC_DISABLE:
 1483         case HFSC_CLEAR_HIERARCHY:
 1484                 ifacep = (struct hfsc_interface *)addr;
 1485                 if ((hif = altq_lookup(ifacep->hfsc_ifname,
 1486                                        ALTQT_HFSC)) == NULL) {
 1487                         error = EBADF;
 1488                         break;
 1489                 }
 1490 
 1491                 switch (cmd) {
 1492 
 1493                 case HFSC_ENABLE:
 1494                         if (hif->hif_defaultclass == NULL) {
 1495 #if 1
 1496                                 printf("hfsc: no default class\n");
 1497 #endif
 1498                                 error = EINVAL;
 1499                                 break;
 1500                         }
 1501                         error = altq_enable(hif->hif_ifq);
 1502                         break;
 1503 
 1504                 case HFSC_DISABLE:
 1505                         error = altq_disable(hif->hif_ifq);
 1506                         break;
 1507 
 1508                 case HFSC_CLEAR_HIERARCHY:
 1509                         hfsc_clear_interface(hif);
 1510                         break;
 1511                 }
 1512                 break;
 1513 
 1514         case HFSC_ADD_CLASS:
 1515                 error = hfsccmd_add_class((struct hfsc_add_class *)addr);
 1516                 break;
 1517 
 1518         case HFSC_DEL_CLASS:
 1519                 error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
 1520                 break;
 1521 
 1522         case HFSC_MOD_CLASS:
 1523                 error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
 1524                 break;
 1525 
 1526         case HFSC_ADD_FILTER:
 1527                 error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
 1528                 break;
 1529 
 1530         case HFSC_DEL_FILTER:
 1531                 error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
 1532                 break;
 1533 
 1534         case HFSC_GETSTATS:
 1535                 error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
 1536                 break;
 1537 
 1538         default:
 1539                 error = EINVAL;
 1540                 break;
 1541         }
 1542         return error;
 1543 }
 1544 
 1545 static int
 1546 hfsccmd_if_attach(ap)
 1547         struct hfsc_attach *ap;
 1548 {
 1549         struct hfsc_if *hif;
 1550         struct ifnet *ifp;
 1551         int error;
 1552         
 1553         if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
 1554                 return (ENXIO);
 1555 
 1556         if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
 1557                 return (ENOMEM);
 1558         
 1559         /*
 1560          * set HFSC to this ifnet structure.
 1561          */
 1562         if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
 1563                                  hfsc_enqueue, hfsc_dequeue, hfsc_request,
 1564                                  &hif->hif_classifier, acc_classify)) != 0)
 1565                 (void)hfsc_detach(hif);
 1566 
 1567         return (error);
 1568 }
 1569 
 1570 static int
 1571 hfsccmd_if_detach(ap)
 1572         struct hfsc_interface *ap;
 1573 {
 1574         struct hfsc_if *hif;
 1575         int error;
 1576 
 1577         if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
 1578                 return (EBADF);
 1579         
 1580         if (ALTQ_IS_ENABLED(hif->hif_ifq))
 1581                 altq_disable(hif->hif_ifq);
 1582 
 1583         if ((error = altq_detach(hif->hif_ifq)))
 1584                 return (error);
 1585 
 1586         return hfsc_detach(hif);
 1587 }
 1588 
 1589 static int
 1590 hfsccmd_add_class(ap)
 1591         struct hfsc_add_class *ap;
 1592 {
 1593         struct hfsc_if *hif;
 1594         struct hfsc_class *cl, *parent;
 1595 
 1596         if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
 1597                 return (EBADF);
 1598 
 1599         if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL) {
 1600                 if (ap->parent_handle == HFSC_ROOTCLASS_HANDLE)
 1601                         parent = hif->hif_rootclass;
 1602                 else
 1603                         return (EINVAL);
 1604         }
 1605         
 1606         if ((cl = hfsc_class_create(hif, &ap->service_curve, parent,
 1607                                     ap->qlimit, ap->flags)) == NULL)
 1608                 return (ENOMEM);
 1609                 
 1610         /* return a class handle to the user */
 1611         ap->class_handle = clp_to_clh(cl);
 1612         return (0);
 1613 }
 1614 
 1615 static int
 1616 hfsccmd_delete_class(ap)
 1617         struct hfsc_delete_class *ap;
 1618 {
 1619         struct hfsc_if *hif;
 1620         struct hfsc_class *cl;
 1621 
 1622         if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
 1623                 return (EBADF);
 1624 
 1625         if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
 1626                 return (EINVAL);
 1627         
 1628         return hfsc_class_destroy(cl);
 1629 }
 1630 
 1631 static int
 1632 hfsccmd_modify_class(ap)
 1633         struct hfsc_modify_class *ap;
 1634 {
 1635         struct hfsc_if *hif;
 1636         struct hfsc_class *cl;
 1637         struct service_curve *rsc = NULL;
 1638         struct service_curve *fsc = NULL;
 1639 
 1640         if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
 1641                 return (EBADF);
 1642 
 1643         if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
 1644                 return (EINVAL);
 1645 
 1646         if (ap->sctype & HFSC_REALTIMESC)
 1647                 rsc = &ap->service_curve;
 1648         if (ap->sctype & HFSC_LINKSHARINGSC)
 1649                 fsc = &ap->service_curve;
 1650 
 1651         return hfsc_class_modify(cl, rsc, fsc);
 1652 }
 1653 
 1654 static int
 1655 hfsccmd_add_filter(ap)
 1656         struct hfsc_add_filter *ap;
 1657 {
 1658         struct hfsc_if *hif;
 1659         struct hfsc_class *cl;
 1660 
 1661         if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
 1662                 return (EBADF);
 1663 
 1664         if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
 1665                 return (EINVAL);
 1666 
 1667         if (is_a_parent_class(cl)) {
 1668 #if 1
 1669                 printf("hfsccmd_add_filter: not a leaf class!\n");
 1670 #endif
 1671                 return (EINVAL);
 1672         }
 1673 
 1674         return acc_add_filter(&hif->hif_classifier, &ap->filter,
 1675                               cl, &ap->filter_handle);
 1676 }
 1677 
 1678 static int
 1679 hfsccmd_delete_filter(ap)
 1680         struct hfsc_delete_filter *ap;
 1681 {
 1682         struct hfsc_if *hif;
 1683 
 1684         if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
 1685                 return (EBADF);
 1686 
 1687         return acc_delete_filter(&hif->hif_classifier,
 1688                                  ap->filter_handle);
 1689 }
 1690 
 1691 static int
 1692 hfsccmd_class_stats(ap)
 1693         struct hfsc_class_stats *ap;
 1694 {
 1695         struct hfsc_if *hif;
 1696         struct hfsc_class *cl;
 1697         struct hfsc_basic_class_stats stats, *usp;
 1698         int     n, nclasses, error;
 1699         
 1700         if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
 1701                 return (EBADF);
 1702 
 1703         ap->cur_time = read_machclk();
 1704         ap->hif_classes = hif->hif_classes;
 1705         ap->hif_packets = hif->hif_packets;
 1706 
 1707         /* skip the first N classes in the tree */
 1708         nclasses = ap->nskip;
 1709         for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
 1710              cl = hfsc_nextclass(cl), n++)
 1711                 ;
 1712         if (n != nclasses)
 1713                 return (EINVAL);
 1714 
 1715         /* then, read the next N classes in the tree */
 1716         nclasses = ap->nclasses;
 1717         usp = ap->stats;
 1718         for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
 1719 
 1720                 get_class_stats(&stats, cl);
 1721                 
 1722                 if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
 1723                                      sizeof(stats))) != 0)
 1724                         return (error);
 1725         }
 1726 
 1727         ap->nclasses = n;
 1728 
 1729         return (0);
 1730 }
 1731 
 1732 static void get_class_stats(sp, cl)
 1733         struct hfsc_basic_class_stats *sp;
 1734         struct hfsc_class *cl;
 1735 {
 1736         sp->class_id = cl->cl_id;
 1737         sp->class_handle = clp_to_clh(cl);
 1738 
 1739         if (cl->cl_rsc != NULL) {
 1740                 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
 1741                 sp->rsc.d = dx2d(cl->cl_rsc->dx);
 1742                 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
 1743         } else {
 1744                 sp->rsc.m1 = 0;
 1745                 sp->rsc.d = 0;
 1746                 sp->rsc.m2 = 0;
 1747         }
 1748         if (cl->cl_fsc != NULL) {
 1749                 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
 1750                 sp->fsc.d = dx2d(cl->cl_fsc->dx);
 1751                 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
 1752         } else {
 1753                 sp->fsc.m1 = 0;
 1754                 sp->fsc.d = 0;
 1755                 sp->fsc.m2 = 0;
 1756         }
 1757 
 1758         sp->total = cl->cl_total;
 1759         sp->cumul = cl->cl_cumul;
 1760 
 1761         sp->d = cl->cl_d;
 1762         sp->e = cl->cl_e;
 1763         sp->vt = cl->cl_vt;
 1764 
 1765         sp->qlength = qlen(cl->cl_q);
 1766         sp->xmit_cnt = cl->cl_stats.xmit_cnt;
 1767         sp->drop_cnt = cl->cl_stats.drop_cnt;
 1768         sp->period = cl->cl_stats.period;
 1769 
 1770         sp->qtype = qtype(cl->cl_q);
 1771 #ifdef ALTQ_RED
 1772         if (q_is_red(cl->cl_q))
 1773                 red_getstats(cl->cl_red, &sp->red[0]);
 1774 #endif
 1775 #ifdef ALTQ_RIO
 1776         if (q_is_rio(cl->cl_q))
 1777                 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
 1778 #endif
 1779 }
 1780 
 1781 /* convert a class handle to the corresponding class pointer */
 1782 static struct hfsc_class *
 1783 clh_to_clp(hif, chandle)
 1784         struct hfsc_if *hif;
 1785         u_long chandle;
 1786 {
 1787         struct hfsc_class *cl;
 1788 
 1789         cl = (struct hfsc_class *)chandle;
 1790         if (chandle != ALIGN(cl)) {
 1791 #if 1
 1792                 printf("clh_to_cl: unaligned pointer %p\n", cl);
 1793 #endif
 1794                 return (NULL);
 1795         }
 1796 
 1797         if (cl == NULL || cl->cl_handle != chandle || cl->cl_hif != hif)
 1798                 return (NULL);
 1799 
 1800         return (cl);
 1801 }
 1802 
 1803 /* convert a class pointer to the corresponding class handle */
 1804 static u_long
 1805 clp_to_clh(cl)
 1806         struct hfsc_class *cl;
 1807 {
 1808         if (cl->cl_parent == NULL)
 1809                 return (HFSC_ROOTCLASS_HANDLE);  /* XXX */
 1810         return (cl->cl_handle);
 1811 }
 1812 
 1813 #ifdef KLD_MODULE
 1814 
 1815 static struct altqsw hfsc_sw =
 1816         {"hfsc", hfscopen, hfscclose, hfscioctl};
 1817 
 1818 ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
 1819 
 1820 #endif /* KLD_MODULE */
 1821 
 1822 #endif /* ALTQ_HFSC */

Cache object: 041a5acabb1b8d3476358b468430ac05


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.