1 /*-
2 * Copyright (c) 1991-1997 Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the Network Research
16 * Group at Lawrence Berkeley Laboratory.
17 * 4. Neither the name of the University nor of the Laboratory may be used
18 * to endorse or promote products derived from this software without
19 * specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * LBL code modified by speer@eng.sun.com, May 1977.
34 * For questions and/or comments, please send mail to cbq@ee.lbl.gov
35 *
36 * @(#)rm_class.c 1.48 97/12/05 SMI
37 * $KAME: altq_rmclass.c,v 1.19 2005/04/13 03:44:25 suz Exp $
38 * $FreeBSD$
39 */
40 #include "opt_altq.h"
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
44
45 #include <sys/param.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/socket.h>
49 #include <sys/systm.h>
50 #include <sys/errno.h>
51 #include <sys/time.h>
52
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/if_private.h>
56
57 #include <net/altq/if_altq.h>
58 #include <net/altq/altq.h>
59 #include <net/altq/altq_codel.h>
60 #include <net/altq/altq_rmclass.h>
61 #include <net/altq/altq_rmclass_debug.h>
62 #include <net/altq/altq_red.h>
63 #include <net/altq/altq_rio.h>
64
65 /*
66 * Local Macros
67 */
68 #define reset_cutoff(ifd) { ifd->cutoff_ = RM_MAXDEPTH; }
69
70 /*
71 * Local routines.
72 */
73
74 static int rmc_satisfied(struct rm_class *, struct timeval *);
75 static void rmc_wrr_set_weights(struct rm_ifdat *);
76 static void rmc_depth_compute(struct rm_class *);
77 static void rmc_depth_recompute(rm_class_t *);
78
79 static mbuf_t *_rmc_wrr_dequeue_next(struct rm_ifdat *, int);
80 static mbuf_t *_rmc_prr_dequeue_next(struct rm_ifdat *, int);
81
82 static int _rmc_addq(rm_class_t *, mbuf_t *);
83 static void _rmc_dropq(rm_class_t *);
84 static mbuf_t *_rmc_getq(rm_class_t *);
85 static mbuf_t *_rmc_pollq(rm_class_t *);
86
87 static int rmc_under_limit(struct rm_class *, struct timeval *);
88 static void rmc_tl_satisfied(struct rm_ifdat *, struct timeval *);
89 static void rmc_drop_action(struct rm_class *);
90 static void rmc_restart(void *);
91 static void rmc_root_overlimit(struct rm_class *, struct rm_class *);
92
93 #define BORROW_OFFTIME
94 /*
95 * BORROW_OFFTIME (experimental):
96 * borrow the offtime of the class borrowing from.
97 * the reason is that when its own offtime is set, the class is unable
98 * to borrow much, especially when cutoff is taking effect.
99 * but when the borrowed class is overloaded (advidle is close to minidle),
100 * use the borrowing class's offtime to avoid overload.
101 */
102 #define ADJUST_CUTOFF
103 /*
104 * ADJUST_CUTOFF (experimental):
105 * if no underlimit class is found due to cutoff, increase cutoff and
106 * retry the scheduling loop.
107 * also, don't invoke delay_actions while cutoff is taking effect,
108 * since a sleeping class won't have a chance to be scheduled in the
109 * next loop.
110 *
111 * now heuristics for setting the top-level variable (cutoff_) becomes:
112 * 1. if a packet arrives for a not-overlimit class, set cutoff
113 * to the depth of the class.
114 * 2. if cutoff is i, and a packet arrives for an overlimit class
115 * with an underlimit ancestor at a lower level than i (say j),
116 * then set cutoff to j.
117 * 3. at scheduling a packet, if there is no underlimit class
118 * due to the current cutoff level, increase cutoff by 1 and
119 * then try to schedule again.
120 */
121
122 /*
123 * rm_class_t *
124 * rmc_newclass(...) - Create a new resource management class at priority
125 * 'pri' on the interface given by 'ifd'.
126 *
127 * nsecPerByte is the data rate of the interface in nanoseconds/byte.
128 * E.g., 800 for a 10Mb/s ethernet. If the class gets less
129 * than 100% of the bandwidth, this number should be the
130 * 'effective' rate for the class. Let f be the
131 * bandwidth fraction allocated to this class, and let
132 * nsPerByte be the data rate of the output link in
133 * nanoseconds/byte. Then nsecPerByte is set to
134 * nsPerByte / f. E.g., 1600 (= 800 / .5)
135 * for a class that gets 50% of an ethernet's bandwidth.
136 *
137 * action the routine to call when the class is over limit.
138 *
139 * maxq max allowable queue size for class (in packets).
140 *
141 * parent parent class pointer.
142 *
143 * borrow class to borrow from (should be either 'parent' or null).
144 *
145 * maxidle max value allowed for class 'idle' time estimate (this
146 * parameter determines how large an initial burst of packets
147 * can be before overlimit action is invoked.
148 *
149 * offtime how long 'delay' action will delay when class goes over
150 * limit (this parameter determines the steady-state burst
151 * size when a class is running over its limit).
152 *
153 * Maxidle and offtime have to be computed from the following: If the
154 * average packet size is s, the bandwidth fraction allocated to this
155 * class is f, we want to allow b packet bursts, and the gain of the
156 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
157 *
158 * ptime = s * nsPerByte * (1 - f) / f
159 * maxidle = ptime * (1 - g^b) / g^b
160 * minidle = -ptime * (1 / (f - 1))
161 * offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
162 *
163 * Operationally, it's convenient to specify maxidle & offtime in units
164 * independent of the link bandwidth so the maxidle & offtime passed to
165 * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
166 * (The constant factor is a scale factor needed to make the parameters
167 * integers. This scaling also means that the 'unscaled' values of
168 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
169 * not nanoseconds.) Also note that the 'idle' filter computation keeps
170 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
171 * maxidle also must be scaled upward by this value. Thus, the passed
172 * values for maxidle and offtime can be computed as follows:
173 *
174 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
175 * offtime = offtime * 8 / (1000 * nsecPerByte)
176 *
177 * When USE_HRTIME is employed, then maxidle and offtime become:
178 * maxidle = maxilde * (8.0 / nsecPerByte);
179 * offtime = offtime * (8.0 / nsecPerByte);
180 */
181 struct rm_class *
182 rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte,
183 void (*action)(rm_class_t *, rm_class_t *), int maxq,
184 struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
185 int minidle, u_int offtime, int pktsize, int flags)
186 {
187 struct rm_class *cl;
188 struct rm_class *peer;
189 int s;
190
191 if (pri >= RM_MAXPRIO)
192 return (NULL);
193 #ifndef ALTQ_RED
194 if (flags & RMCF_RED) {
195 #ifdef ALTQ_DEBUG
196 printf("rmc_newclass: RED not configured for CBQ!\n");
197 #endif
198 return (NULL);
199 }
200 #endif
201 #ifndef ALTQ_RIO
202 if (flags & RMCF_RIO) {
203 #ifdef ALTQ_DEBUG
204 printf("rmc_newclass: RIO not configured for CBQ!\n");
205 #endif
206 return (NULL);
207 }
208 #endif
209 #ifndef ALTQ_CODEL
210 if (flags & RMCF_CODEL) {
211 #ifdef ALTQ_DEBUG
212 printf("rmc_newclass: CODEL not configured for CBQ!\n");
213 #endif
214 return (NULL);
215 }
216 #endif
217
218 cl = malloc(sizeof(struct rm_class), M_DEVBUF, M_NOWAIT | M_ZERO);
219 if (cl == NULL)
220 return (NULL);
221 CALLOUT_INIT(&cl->callout_);
222 cl->q_ = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO);
223 if (cl->q_ == NULL) {
224 free(cl, M_DEVBUF);
225 return (NULL);
226 }
227
228 /*
229 * Class initialization.
230 */
231 cl->children_ = NULL;
232 cl->parent_ = parent;
233 cl->borrow_ = borrow;
234 cl->leaf_ = 1;
235 cl->ifdat_ = ifd;
236 cl->pri_ = pri;
237 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
238 cl->depth_ = 0;
239 cl->qthresh_ = 0;
240 cl->ns_per_byte_ = nsecPerByte;
241
242 qlimit(cl->q_) = maxq;
243 qtype(cl->q_) = Q_DROPHEAD;
244 qlen(cl->q_) = 0;
245 cl->flags_ = flags;
246
247 #if 1 /* minidle is also scaled in ALTQ */
248 cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
249 if (cl->minidle_ > 0)
250 cl->minidle_ = 0;
251 #else
252 cl->minidle_ = minidle;
253 #endif
254 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
255 if (cl->maxidle_ == 0)
256 cl->maxidle_ = 1;
257 #if 1 /* offtime is also scaled in ALTQ */
258 cl->avgidle_ = cl->maxidle_;
259 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
260 if (cl->offtime_ == 0)
261 cl->offtime_ = 1;
262 #else
263 cl->avgidle_ = 0;
264 cl->offtime_ = (offtime * nsecPerByte) / 8;
265 #endif
266 cl->overlimit = action;
267
268 #ifdef ALTQ_RED
269 if (flags & (RMCF_RED|RMCF_RIO)) {
270 int red_flags, red_pkttime;
271
272 red_flags = 0;
273 if (flags & RMCF_ECN)
274 red_flags |= REDF_ECN;
275 if (flags & RMCF_FLOWVALVE)
276 red_flags |= REDF_FLOWVALVE;
277 #ifdef ALTQ_RIO
278 if (flags & RMCF_CLEARDSCP)
279 red_flags |= RIOF_CLEARDSCP;
280 #endif
281 red_pkttime = nsecPerByte * pktsize / 1000;
282
283 if (flags & RMCF_RED) {
284 cl->red_ = red_alloc(0, 0,
285 qlimit(cl->q_) * 10/100,
286 qlimit(cl->q_) * 30/100,
287 red_flags, red_pkttime);
288 if (cl->red_ != NULL)
289 qtype(cl->q_) = Q_RED;
290 }
291 #ifdef ALTQ_RIO
292 else {
293 cl->red_ = (red_t *)rio_alloc(0, NULL,
294 red_flags, red_pkttime);
295 if (cl->red_ != NULL)
296 qtype(cl->q_) = Q_RIO;
297 }
298 #endif
299 }
300 #endif /* ALTQ_RED */
301 #ifdef ALTQ_CODEL
302 if (flags & RMCF_CODEL) {
303 cl->codel_ = codel_alloc(5, 100, 0);
304 if (cl->codel_ != NULL)
305 qtype(cl->q_) = Q_CODEL;
306 }
307 #endif
308
309 /*
310 * put the class into the class tree
311 */
312 s = splnet();
313 IFQ_LOCK(ifd->ifq_);
314 if ((peer = ifd->active_[pri]) != NULL) {
315 /* find the last class at this pri */
316 cl->peer_ = peer;
317 while (peer->peer_ != ifd->active_[pri])
318 peer = peer->peer_;
319 peer->peer_ = cl;
320 } else {
321 ifd->active_[pri] = cl;
322 cl->peer_ = cl;
323 }
324
325 if (cl->parent_) {
326 cl->next_ = parent->children_;
327 parent->children_ = cl;
328 parent->leaf_ = 0;
329 }
330
331 /*
332 * Compute the depth of this class and its ancestors in the class
333 * hierarchy.
334 */
335 rmc_depth_compute(cl);
336
337 /*
338 * If CBQ's WRR is enabled, then initialize the class WRR state.
339 */
340 if (ifd->wrr_) {
341 ifd->num_[pri]++;
342 ifd->alloc_[pri] += cl->allotment_;
343 rmc_wrr_set_weights(ifd);
344 }
345 IFQ_UNLOCK(ifd->ifq_);
346 splx(s);
347 return (cl);
348 }
349
350 int
351 rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
352 int minidle, u_int offtime, int pktsize)
353 {
354 struct rm_ifdat *ifd;
355 u_int old_allotment;
356 int s;
357
358 ifd = cl->ifdat_;
359 old_allotment = cl->allotment_;
360
361 s = splnet();
362 IFQ_LOCK(ifd->ifq_);
363 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
364 cl->qthresh_ = 0;
365 cl->ns_per_byte_ = nsecPerByte;
366
367 qlimit(cl->q_) = maxq;
368
369 #if 1 /* minidle is also scaled in ALTQ */
370 cl->minidle_ = (minidle * nsecPerByte) / 8;
371 if (cl->minidle_ > 0)
372 cl->minidle_ = 0;
373 #else
374 cl->minidle_ = minidle;
375 #endif
376 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
377 if (cl->maxidle_ == 0)
378 cl->maxidle_ = 1;
379 #if 1 /* offtime is also scaled in ALTQ */
380 cl->avgidle_ = cl->maxidle_;
381 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
382 if (cl->offtime_ == 0)
383 cl->offtime_ = 1;
384 #else
385 cl->avgidle_ = 0;
386 cl->offtime_ = (offtime * nsecPerByte) / 8;
387 #endif
388
389 /*
390 * If CBQ's WRR is enabled, then initialize the class WRR state.
391 */
392 if (ifd->wrr_) {
393 ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
394 rmc_wrr_set_weights(ifd);
395 }
396 IFQ_UNLOCK(ifd->ifq_);
397 splx(s);
398 return (0);
399 }
400
401 /*
402 * static void
403 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
404 * the appropriate run robin weights for the CBQ weighted round robin
405 * algorithm.
406 *
407 * Returns: NONE
408 */
409
410 static void
411 rmc_wrr_set_weights(struct rm_ifdat *ifd)
412 {
413 int i;
414 struct rm_class *cl, *clh;
415
416 for (i = 0; i < RM_MAXPRIO; i++) {
417 /*
418 * This is inverted from that of the simulator to
419 * maintain precision.
420 */
421 if (ifd->num_[i] == 0)
422 ifd->M_[i] = 0;
423 else
424 ifd->M_[i] = ifd->alloc_[i] /
425 (ifd->num_[i] * ifd->maxpkt_);
426 /*
427 * Compute the weighted allotment for each class.
428 * This takes the expensive div instruction out
429 * of the main loop for the wrr scheduling path.
430 * These only get recomputed when a class comes or
431 * goes.
432 */
433 if (ifd->active_[i] != NULL) {
434 clh = cl = ifd->active_[i];
435 do {
436 /* safe-guard for slow link or alloc_ == 0 */
437 if (ifd->M_[i] == 0)
438 cl->w_allotment_ = 0;
439 else
440 cl->w_allotment_ = cl->allotment_ /
441 ifd->M_[i];
442 cl = cl->peer_;
443 } while ((cl != NULL) && (cl != clh));
444 }
445 }
446 }
447
448 int
449 rmc_get_weight(struct rm_ifdat *ifd, int pri)
450 {
451 if ((pri >= 0) && (pri < RM_MAXPRIO))
452 return (ifd->M_[pri]);
453 else
454 return (0);
455 }
456
457 /*
458 * static void
459 * rmc_depth_compute(struct rm_class *cl) - This function computes the
460 * appropriate depth of class 'cl' and its ancestors.
461 *
462 * Returns: NONE
463 */
464
465 static void
466 rmc_depth_compute(struct rm_class *cl)
467 {
468 rm_class_t *t = cl, *p;
469
470 /*
471 * Recompute the depth for the branch of the tree.
472 */
473 while (t != NULL) {
474 p = t->parent_;
475 if (p && (t->depth_ >= p->depth_)) {
476 p->depth_ = t->depth_ + 1;
477 t = p;
478 } else
479 t = NULL;
480 }
481 }
482
483 /*
484 * static void
485 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
486 * the depth of the tree after a class has been deleted.
487 *
488 * Returns: NONE
489 */
490
491 static void
492 rmc_depth_recompute(rm_class_t *cl)
493 {
494 #if 1 /* ALTQ */
495 rm_class_t *p, *t;
496
497 p = cl;
498 while (p != NULL) {
499 if ((t = p->children_) == NULL) {
500 p->depth_ = 0;
501 } else {
502 int cdepth = 0;
503
504 while (t != NULL) {
505 if (t->depth_ > cdepth)
506 cdepth = t->depth_;
507 t = t->next_;
508 }
509
510 if (p->depth_ == cdepth + 1)
511 /* no change to this parent */
512 return;
513
514 p->depth_ = cdepth + 1;
515 }
516
517 p = p->parent_;
518 }
519 #else
520 rm_class_t *t;
521
522 if (cl->depth_ >= 1) {
523 if (cl->children_ == NULL) {
524 cl->depth_ = 0;
525 } else if ((t = cl->children_) != NULL) {
526 while (t != NULL) {
527 if (t->children_ != NULL)
528 rmc_depth_recompute(t);
529 t = t->next_;
530 }
531 } else
532 rmc_depth_compute(cl);
533 }
534 #endif
535 }
536
537 /*
538 * void
539 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
540 * function deletes a class from the link-sharing structure and frees
541 * all resources associated with the class.
542 *
543 * Returns: NONE
544 */
545
546 void
547 rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
548 {
549 struct rm_class *p, *head, *previous;
550 int s;
551
552 ASSERT(cl->children_ == NULL);
553
554 if (cl->sleeping_)
555 CALLOUT_STOP(&cl->callout_);
556
557 s = splnet();
558 IFQ_LOCK(ifd->ifq_);
559 /*
560 * Free packets in the packet queue.
561 * XXX - this may not be a desired behavior. Packets should be
562 * re-queued.
563 */
564 rmc_dropall(cl);
565
566 /*
567 * If the class has a parent, then remove the class from the
568 * class from the parent's children chain.
569 */
570 if (cl->parent_ != NULL) {
571 head = cl->parent_->children_;
572 p = previous = head;
573 if (head->next_ == NULL) {
574 ASSERT(head == cl);
575 cl->parent_->children_ = NULL;
576 cl->parent_->leaf_ = 1;
577 } else while (p != NULL) {
578 if (p == cl) {
579 if (cl == head)
580 cl->parent_->children_ = cl->next_;
581 else
582 previous->next_ = cl->next_;
583 cl->next_ = NULL;
584 p = NULL;
585 } else {
586 previous = p;
587 p = p->next_;
588 }
589 }
590 }
591
592 /*
593 * Delete class from class priority peer list.
594 */
595 if ((p = ifd->active_[cl->pri_]) != NULL) {
596 /*
597 * If there is more than one member of this priority
598 * level, then look for class(cl) in the priority level.
599 */
600 if (p != p->peer_) {
601 while (p->peer_ != cl)
602 p = p->peer_;
603 p->peer_ = cl->peer_;
604
605 if (ifd->active_[cl->pri_] == cl)
606 ifd->active_[cl->pri_] = cl->peer_;
607 } else {
608 ASSERT(p == cl);
609 ifd->active_[cl->pri_] = NULL;
610 }
611 }
612
613 /*
614 * Recompute the WRR weights.
615 */
616 if (ifd->wrr_) {
617 ifd->alloc_[cl->pri_] -= cl->allotment_;
618 ifd->num_[cl->pri_]--;
619 rmc_wrr_set_weights(ifd);
620 }
621
622 /*
623 * Re-compute the depth of the tree.
624 */
625 #if 1 /* ALTQ */
626 rmc_depth_recompute(cl->parent_);
627 #else
628 rmc_depth_recompute(ifd->root_);
629 #endif
630
631 IFQ_UNLOCK(ifd->ifq_);
632 splx(s);
633
634 /*
635 * Free the class structure.
636 */
637 if (cl->red_ != NULL) {
638 #ifdef ALTQ_RIO
639 if (q_is_rio(cl->q_))
640 rio_destroy((rio_t *)cl->red_);
641 #endif
642 #ifdef ALTQ_RED
643 if (q_is_red(cl->q_))
644 red_destroy(cl->red_);
645 #endif
646 #ifdef ALTQ_CODEL
647 if (q_is_codel(cl->q_))
648 codel_destroy(cl->codel_);
649 #endif
650 }
651 free(cl->q_, M_DEVBUF);
652 free(cl, M_DEVBUF);
653 }
654
655 /*
656 * void
657 * rmc_init(...) - Initialize the resource management data structures
658 * associated with the output portion of interface 'ifp'. 'ifd' is
659 * where the structures will be built (for backwards compatibility, the
660 * structures aren't kept in the ifnet struct). 'nsecPerByte'
661 * gives the link speed (inverse of bandwidth) in nanoseconds/byte.
662 * 'restart' is the driver-specific routine that the generic 'delay
663 * until under limit' action will call to restart output. `maxq'
664 * is the queue size of the 'link' & 'default' classes. 'maxqueued'
665 * is the maximum number of packets that the resource management
666 * code will allow to be queued 'downstream' (this is typically 1).
667 *
668 * Returns: NONE
669 */
670
671 void
672 rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte,
673 void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle,
674 int minidle, u_int offtime, int flags)
675 {
676 int i, mtu;
677
678 /*
679 * Initialize the CBQ tracing/debug facility.
680 */
681 CBQTRACEINIT();
682
683 bzero((char *)ifd, sizeof (*ifd));
684 mtu = ifq->altq_ifp->if_mtu;
685 ifd->ifq_ = ifq;
686 ifd->restart = restart;
687 ifd->maxqueued_ = maxqueued;
688 ifd->ns_per_byte_ = nsecPerByte;
689 ifd->maxpkt_ = mtu;
690 ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
691 ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
692 #if 1
693 ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16;
694 if (mtu * nsecPerByte > 10 * 1000000)
695 ifd->maxiftime_ /= 4;
696 #endif
697
698 reset_cutoff(ifd);
699 CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
700
701 /*
702 * Initialize the CBQ's WRR state.
703 */
704 for (i = 0; i < RM_MAXPRIO; i++) {
705 ifd->alloc_[i] = 0;
706 ifd->M_[i] = 0;
707 ifd->num_[i] = 0;
708 ifd->na_[i] = 0;
709 ifd->active_[i] = NULL;
710 }
711
712 /*
713 * Initialize current packet state.
714 */
715 ifd->qi_ = 0;
716 ifd->qo_ = 0;
717 for (i = 0; i < RM_MAXQUEUED; i++) {
718 ifd->class_[i] = NULL;
719 ifd->curlen_[i] = 0;
720 ifd->borrowed_[i] = NULL;
721 }
722
723 /*
724 * Create the root class of the link-sharing structure.
725 */
726 if ((ifd->root_ = rmc_newclass(0, ifd,
727 nsecPerByte,
728 rmc_root_overlimit, maxq, 0, 0,
729 maxidle, minidle, offtime,
730 0, 0)) == NULL) {
731 printf("rmc_init: root class not allocated\n");
732 return ;
733 }
734 ifd->root_->depth_ = 0;
735 }
736
737 /*
738 * void
739 * rmc_queue_packet(struct rm_class *cl, mbuf_t *m) - Add packet given by
740 * mbuf 'm' to queue for resource class 'cl'. This routine is called
741 * by a driver's if_output routine. This routine must be called with
742 * output packet completion interrupts locked out (to avoid racing with
743 * rmc_dequeue_next).
744 *
745 * Returns: 0 on successful queueing
746 * -1 when packet drop occurs
747 */
748 int
749 rmc_queue_packet(struct rm_class *cl, mbuf_t *m)
750 {
751 struct timeval now;
752 struct rm_ifdat *ifd = cl->ifdat_;
753 int cpri = cl->pri_;
754 int is_empty = qempty(cl->q_);
755
756 RM_GETTIME(now);
757 if (ifd->cutoff_ > 0) {
758 if (TV_LT(&cl->undertime_, &now)) {
759 if (ifd->cutoff_ > cl->depth_)
760 ifd->cutoff_ = cl->depth_;
761 CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
762 }
763 #if 1 /* ALTQ */
764 else {
765 /*
766 * the class is overlimit. if the class has
767 * underlimit ancestors, set cutoff to the lowest
768 * depth among them.
769 */
770 struct rm_class *borrow = cl->borrow_;
771
772 while (borrow != NULL &&
773 borrow->depth_ < ifd->cutoff_) {
774 if (TV_LT(&borrow->undertime_, &now)) {
775 ifd->cutoff_ = borrow->depth_;
776 CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
777 break;
778 }
779 borrow = borrow->borrow_;
780 }
781 }
782 #else /* !ALTQ */
783 else if ((ifd->cutoff_ > 1) && cl->borrow_) {
784 if (TV_LT(&cl->borrow_->undertime_, &now)) {
785 ifd->cutoff_ = cl->borrow_->depth_;
786 CBQTRACE(rmc_queue_packet, 'ffob',
787 cl->borrow_->depth_);
788 }
789 }
790 #endif /* !ALTQ */
791 }
792
793 if (_rmc_addq(cl, m) < 0)
794 /* failed */
795 return (-1);
796
797 if (is_empty) {
798 CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
799 ifd->na_[cpri]++;
800 }
801
802 if (qlen(cl->q_) > qlimit(cl->q_)) {
803 /* note: qlimit can be set to 0 or 1 */
804 rmc_drop_action(cl);
805 return (-1);
806 }
807 return (0);
808 }
809
810 /*
811 * void
812 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
813 * classes to see if there are satified.
814 */
815
816 static void
817 rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now)
818 {
819 int i;
820 rm_class_t *p, *bp;
821
822 for (i = RM_MAXPRIO - 1; i >= 0; i--) {
823 if ((bp = ifd->active_[i]) != NULL) {
824 p = bp;
825 do {
826 if (!rmc_satisfied(p, now)) {
827 ifd->cutoff_ = p->depth_;
828 return;
829 }
830 p = p->peer_;
831 } while (p != bp);
832 }
833 }
834
835 reset_cutoff(ifd);
836 }
837
838 /*
839 * rmc_satisfied - Return 1 of the class is satisfied. O, otherwise.
840 */
841
842 static int
843 rmc_satisfied(struct rm_class *cl, struct timeval *now)
844 {
845 rm_class_t *p;
846
847 if (cl == NULL)
848 return (1);
849 if (TV_LT(now, &cl->undertime_))
850 return (1);
851 if (cl->depth_ == 0) {
852 if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
853 return (0);
854 else
855 return (1);
856 }
857 if (cl->children_ != NULL) {
858 p = cl->children_;
859 while (p != NULL) {
860 if (!rmc_satisfied(p, now))
861 return (0);
862 p = p->next_;
863 }
864 }
865
866 return (1);
867 }
868
869 /*
870 * Return 1 if class 'cl' is under limit or can borrow from a parent,
871 * 0 if overlimit. As a side-effect, this routine will invoke the
872 * class overlimit action if the class if overlimit.
873 */
874
875 static int
876 rmc_under_limit(struct rm_class *cl, struct timeval *now)
877 {
878 rm_class_t *p = cl;
879 rm_class_t *top;
880 struct rm_ifdat *ifd = cl->ifdat_;
881
882 ifd->borrowed_[ifd->qi_] = NULL;
883 /*
884 * If cl is the root class, then always return that it is
885 * underlimit. Otherwise, check to see if the class is underlimit.
886 */
887 if (cl->parent_ == NULL)
888 return (1);
889
890 if (cl->sleeping_) {
891 if (TV_LT(now, &cl->undertime_))
892 return (0);
893
894 CALLOUT_STOP(&cl->callout_);
895 cl->sleeping_ = 0;
896 cl->undertime_.tv_sec = 0;
897 return (1);
898 }
899
900 top = NULL;
901 while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
902 if (((cl = cl->borrow_) == NULL) ||
903 (cl->depth_ > ifd->cutoff_)) {
904 #ifdef ADJUST_CUTOFF
905 if (cl != NULL)
906 /* cutoff is taking effect, just
907 return false without calling
908 the delay action. */
909 return (0);
910 #endif
911 #ifdef BORROW_OFFTIME
912 /*
913 * check if the class can borrow offtime too.
914 * borrow offtime from the top of the borrow
915 * chain if the top class is not overloaded.
916 */
917 if (cl != NULL) {
918 /* cutoff is taking effect, use this class as top. */
919 top = cl;
920 CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
921 }
922 if (top != NULL && top->avgidle_ == top->minidle_)
923 top = NULL;
924 p->overtime_ = *now;
925 (p->overlimit)(p, top);
926 #else
927 p->overtime_ = *now;
928 (p->overlimit)(p, NULL);
929 #endif
930 return (0);
931 }
932 top = cl;
933 }
934
935 if (cl != p)
936 ifd->borrowed_[ifd->qi_] = cl;
937 return (1);
938 }
939
940 /*
941 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
942 * Packet-by-packet round robin.
943 *
944 * The heart of the weighted round-robin scheduler, which decides which
945 * class next gets to send a packet. Highest priority first, then
946 * weighted round-robin within priorites.
947 *
948 * Each able-to-send class gets to send until its byte allocation is
949 * exhausted. Thus, the active pointer is only changed after a class has
950 * exhausted its allocation.
951 *
952 * If the scheduler finds no class that is underlimit or able to borrow,
953 * then the first class found that had a nonzero queue and is allowed to
954 * borrow gets to send.
955 */
956
957 static mbuf_t *
958 _rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
959 {
960 struct rm_class *cl = NULL, *first = NULL;
961 u_int deficit;
962 int cpri;
963 mbuf_t *m;
964 struct timeval now;
965
966 RM_GETTIME(now);
967
968 /*
969 * if the driver polls the top of the queue and then removes
970 * the polled packet, we must return the same packet.
971 */
972 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
973 cl = ifd->pollcache_;
974 cpri = cl->pri_;
975 if (ifd->efficient_) {
976 /* check if this class is overlimit */
977 if (cl->undertime_.tv_sec != 0 &&
978 rmc_under_limit(cl, &now) == 0)
979 first = cl;
980 }
981 ifd->pollcache_ = NULL;
982 goto _wrr_out;
983 }
984 else {
985 /* mode == ALTDQ_POLL || pollcache == NULL */
986 ifd->pollcache_ = NULL;
987 ifd->borrowed_[ifd->qi_] = NULL;
988 }
989 #ifdef ADJUST_CUTOFF
990 _again:
991 #endif
992 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
993 if (ifd->na_[cpri] == 0)
994 continue;
995 deficit = 0;
996 /*
997 * Loop through twice for a priority level, if some class
998 * was unable to send a packet the first round because
999 * of the weighted round-robin mechanism.
1000 * During the second loop at this level, deficit==2.
1001 * (This second loop is not needed if for every class,
1002 * "M[cl->pri_])" times "cl->allotment" is greater than
1003 * the byte size for the largest packet in the class.)
1004 */
1005 _wrr_loop:
1006 cl = ifd->active_[cpri];
1007 ASSERT(cl != NULL);
1008 do {
1009 if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
1010 cl->bytes_alloc_ += cl->w_allotment_;
1011 if (!qempty(cl->q_)) {
1012 if ((cl->undertime_.tv_sec == 0) ||
1013 rmc_under_limit(cl, &now)) {
1014 if (cl->bytes_alloc_ > 0 || deficit > 1)
1015 goto _wrr_out;
1016
1017 /* underlimit but no alloc */
1018 deficit = 1;
1019 #if 1
1020 ifd->borrowed_[ifd->qi_] = NULL;
1021 #endif
1022 }
1023 else if (first == NULL && cl->borrow_ != NULL)
1024 first = cl; /* borrowing candidate */
1025 }
1026
1027 cl->bytes_alloc_ = 0;
1028 cl = cl->peer_;
1029 } while (cl != ifd->active_[cpri]);
1030
1031 if (deficit == 1) {
1032 /* first loop found an underlimit class with deficit */
1033 /* Loop on same priority level, with new deficit. */
1034 deficit = 2;
1035 goto _wrr_loop;
1036 }
1037 }
1038
1039 #ifdef ADJUST_CUTOFF
1040 /*
1041 * no underlimit class found. if cutoff is taking effect,
1042 * increase cutoff and try again.
1043 */
1044 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1045 ifd->cutoff_++;
1046 CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
1047 goto _again;
1048 }
1049 #endif /* ADJUST_CUTOFF */
1050 /*
1051 * If LINK_EFFICIENCY is turned on, then the first overlimit
1052 * class we encounter will send a packet if all the classes
1053 * of the link-sharing structure are overlimit.
1054 */
1055 reset_cutoff(ifd);
1056 CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
1057
1058 if (!ifd->efficient_ || first == NULL)
1059 return (NULL);
1060
1061 cl = first;
1062 cpri = cl->pri_;
1063 #if 0 /* too time-consuming for nothing */
1064 if (cl->sleeping_)
1065 CALLOUT_STOP(&cl->callout_);
1066 cl->sleeping_ = 0;
1067 cl->undertime_.tv_sec = 0;
1068 #endif
1069 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1070 ifd->cutoff_ = cl->borrow_->depth_;
1071
1072 /*
1073 * Deque the packet and do the book keeping...
1074 */
1075 _wrr_out:
1076 if (op == ALTDQ_REMOVE) {
1077 m = _rmc_getq(cl);
1078 if (m == NULL)
1079 panic("_rmc_wrr_dequeue_next");
1080 if (qempty(cl->q_))
1081 ifd->na_[cpri]--;
1082
1083 /*
1084 * Update class statistics and link data.
1085 */
1086 if (cl->bytes_alloc_ > 0)
1087 cl->bytes_alloc_ -= m_pktlen(m);
1088
1089 if ((cl->bytes_alloc_ <= 0) || first == cl)
1090 ifd->active_[cl->pri_] = cl->peer_;
1091 else
1092 ifd->active_[cl->pri_] = cl;
1093
1094 ifd->class_[ifd->qi_] = cl;
1095 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1096 ifd->now_[ifd->qi_] = now;
1097 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1098 ifd->queued_++;
1099 } else {
1100 /* mode == ALTDQ_PPOLL */
1101 m = _rmc_pollq(cl);
1102 ifd->pollcache_ = cl;
1103 }
1104 return (m);
1105 }
1106
1107 /*
1108 * Dequeue & return next packet from the highest priority class that
1109 * has a packet to send & has enough allocation to send it. This
1110 * routine is called by a driver whenever it needs a new packet to
1111 * output.
1112 */
1113 static mbuf_t *
1114 _rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
1115 {
1116 mbuf_t *m;
1117 int cpri;
1118 struct rm_class *cl, *first = NULL;
1119 struct timeval now;
1120
1121 RM_GETTIME(now);
1122
1123 /*
1124 * if the driver polls the top of the queue and then removes
1125 * the polled packet, we must return the same packet.
1126 */
1127 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1128 cl = ifd->pollcache_;
1129 cpri = cl->pri_;
1130 ifd->pollcache_ = NULL;
1131 goto _prr_out;
1132 } else {
1133 /* mode == ALTDQ_POLL || pollcache == NULL */
1134 ifd->pollcache_ = NULL;
1135 ifd->borrowed_[ifd->qi_] = NULL;
1136 }
1137 #ifdef ADJUST_CUTOFF
1138 _again:
1139 #endif
1140 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1141 if (ifd->na_[cpri] == 0)
1142 continue;
1143 cl = ifd->active_[cpri];
1144 ASSERT(cl != NULL);
1145 do {
1146 if (!qempty(cl->q_)) {
1147 if ((cl->undertime_.tv_sec == 0) ||
1148 rmc_under_limit(cl, &now))
1149 goto _prr_out;
1150 if (first == NULL && cl->borrow_ != NULL)
1151 first = cl;
1152 }
1153 cl = cl->peer_;
1154 } while (cl != ifd->active_[cpri]);
1155 }
1156
1157 #ifdef ADJUST_CUTOFF
1158 /*
1159 * no underlimit class found. if cutoff is taking effect, increase
1160 * cutoff and try again.
1161 */
1162 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1163 ifd->cutoff_++;
1164 goto _again;
1165 }
1166 #endif /* ADJUST_CUTOFF */
1167 /*
1168 * If LINK_EFFICIENCY is turned on, then the first overlimit
1169 * class we encounter will send a packet if all the classes
1170 * of the link-sharing structure are overlimit.
1171 */
1172 reset_cutoff(ifd);
1173 if (!ifd->efficient_ || first == NULL)
1174 return (NULL);
1175
1176 cl = first;
1177 cpri = cl->pri_;
1178 #if 0 /* too time-consuming for nothing */
1179 if (cl->sleeping_)
1180 CALLOUT_STOP(&cl->callout_);
1181 cl->sleeping_ = 0;
1182 cl->undertime_.tv_sec = 0;
1183 #endif
1184 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1185 ifd->cutoff_ = cl->borrow_->depth_;
1186
1187 /*
1188 * Deque the packet and do the book keeping...
1189 */
1190 _prr_out:
1191 if (op == ALTDQ_REMOVE) {
1192 m = _rmc_getq(cl);
1193 if (m == NULL)
1194 panic("_rmc_prr_dequeue_next");
1195 if (qempty(cl->q_))
1196 ifd->na_[cpri]--;
1197
1198 ifd->active_[cpri] = cl->peer_;
1199
1200 ifd->class_[ifd->qi_] = cl;
1201 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1202 ifd->now_[ifd->qi_] = now;
1203 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1204 ifd->queued_++;
1205 } else {
1206 /* mode == ALTDQ_POLL */
1207 m = _rmc_pollq(cl);
1208 ifd->pollcache_ = cl;
1209 }
1210 return (m);
1211 }
1212
1213 /*
1214 * mbuf_t *
1215 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
1216 * is invoked by the packet driver to get the next packet to be
1217 * dequeued and output on the link. If WRR is enabled, then the
1218 * WRR dequeue next routine will determine the next packet to sent.
1219 * Otherwise, packet-by-packet round robin is invoked.
1220 *
1221 * Returns: NULL, if a packet is not available or if all
1222 * classes are overlimit.
1223 *
1224 * Otherwise, Pointer to the next packet.
1225 */
1226
1227 mbuf_t *
1228 rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
1229 {
1230 if (ifd->queued_ >= ifd->maxqueued_)
1231 return (NULL);
1232 else if (ifd->wrr_)
1233 return (_rmc_wrr_dequeue_next(ifd, mode));
1234 else
1235 return (_rmc_prr_dequeue_next(ifd, mode));
1236 }
1237
1238 /*
1239 * Update the utilization estimate for the packet that just completed.
1240 * The packet's class & the parent(s) of that class all get their
1241 * estimators updated. This routine is called by the driver's output-
1242 * packet-completion interrupt service routine.
1243 */
1244
1245 /*
1246 * a macro to approximate "divide by 1000" that gives 0.000999,
1247 * if a value has enough effective digits.
1248 * (on pentium, mul takes 9 cycles but div takes 46!)
1249 */
1250 #define NSEC_TO_USEC(t) (((t) >> 10) + ((t) >> 16) + ((t) >> 17))
1251 void
1252 rmc_update_class_util(struct rm_ifdat *ifd)
1253 {
1254 int idle, avgidle, pktlen;
1255 int pkt_time, tidle;
1256 rm_class_t *cl, *borrowed;
1257 rm_class_t *borrows;
1258 struct timeval *nowp;
1259
1260 /*
1261 * Get the most recent completed class.
1262 */
1263 if ((cl = ifd->class_[ifd->qo_]) == NULL)
1264 return;
1265
1266 pktlen = ifd->curlen_[ifd->qo_];
1267 borrowed = ifd->borrowed_[ifd->qo_];
1268 borrows = borrowed;
1269
1270 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1271
1272 /*
1273 * Run estimator on class and its ancestors.
1274 */
1275 /*
1276 * rm_update_class_util is designed to be called when the
1277 * transfer is completed from a xmit complete interrupt,
1278 * but most drivers don't implement an upcall for that.
1279 * so, just use estimated completion time.
1280 * as a result, ifd->qi_ and ifd->qo_ are always synced.
1281 */
1282 nowp = &ifd->now_[ifd->qo_];
1283 /* get pkt_time (for link) in usec */
1284 #if 1 /* use approximation */
1285 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_;
1286 pkt_time = NSEC_TO_USEC(pkt_time);
1287 #else
1288 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
1289 #endif
1290 #if 1 /* ALTQ4PPP */
1291 if (TV_LT(nowp, &ifd->ifnow_)) {
1292 int iftime;
1293
1294 /*
1295 * make sure the estimated completion time does not go
1296 * too far. it can happen when the link layer supports
1297 * data compression or the interface speed is set to
1298 * a much lower value.
1299 */
1300 TV_DELTA(&ifd->ifnow_, nowp, iftime);
1301 if (iftime+pkt_time < ifd->maxiftime_) {
1302 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1303 } else {
1304 TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
1305 }
1306 } else {
1307 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1308 }
1309 #else
1310 if (TV_LT(nowp, &ifd->ifnow_)) {
1311 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1312 } else {
1313 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1314 }
1315 #endif
1316
1317 while (cl != NULL) {
1318 TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
1319 if (idle >= 2000000)
1320 /*
1321 * this class is idle enough, reset avgidle.
1322 * (TV_DELTA returns 2000000 us when delta is large.)
1323 */
1324 cl->avgidle_ = cl->maxidle_;
1325
1326 /* get pkt_time (for class) in usec */
1327 #if 1 /* use approximation */
1328 pkt_time = pktlen * cl->ns_per_byte_;
1329 pkt_time = NSEC_TO_USEC(pkt_time);
1330 #else
1331 pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1332 #endif
1333 idle -= pkt_time;
1334
1335 avgidle = cl->avgidle_;
1336 avgidle += idle - (avgidle >> RM_FILTER_GAIN);
1337 cl->avgidle_ = avgidle;
1338
1339 /* Are we overlimit ? */
1340 if (avgidle <= 0) {
1341 CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
1342 #if 1 /* ALTQ */
1343 /*
1344 * need some lower bound for avgidle, otherwise
1345 * a borrowing class gets unbounded penalty.
1346 */
1347 if (avgidle < cl->minidle_)
1348 avgidle = cl->avgidle_ = cl->minidle_;
1349 #endif
1350 /* set next idle to make avgidle 0 */
1351 tidle = pkt_time +
1352 (((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
1353 TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
1354 ++cl->stats_.over;
1355 } else {
1356 cl->avgidle_ =
1357 (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1358 cl->undertime_.tv_sec = 0;
1359 if (cl->sleeping_) {
1360 CALLOUT_STOP(&cl->callout_);
1361 cl->sleeping_ = 0;
1362 }
1363 }
1364
1365 if (borrows != NULL) {
1366 if (borrows != cl)
1367 ++cl->stats_.borrows;
1368 else
1369 borrows = NULL;
1370 }
1371 cl->last_ = ifd->ifnow_;
1372 cl->last_pkttime_ = pkt_time;
1373
1374 #if 1
1375 if (cl->parent_ == NULL) {
1376 /* take stats of root class */
1377 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1378 }
1379 #endif
1380
1381 cl = cl->parent_;
1382 }
1383
1384 /*
1385 * Check to see if cutoff needs to set to a new level.
1386 */
1387 cl = ifd->class_[ifd->qo_];
1388 if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
1389 #if 1 /* ALTQ */
1390 if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
1391 rmc_tl_satisfied(ifd, nowp);
1392 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1393 } else {
1394 ifd->cutoff_ = borrowed->depth_;
1395 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1396 }
1397 #else /* !ALTQ */
1398 if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
1399 reset_cutoff(ifd);
1400 #ifdef notdef
1401 rmc_tl_satisfied(ifd, &now);
1402 #endif
1403 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1404 } else {
1405 ifd->cutoff_ = borrowed->depth_;
1406 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1407 }
1408 #endif /* !ALTQ */
1409 }
1410
1411 /*
1412 * Release class slot
1413 */
1414 ifd->borrowed_[ifd->qo_] = NULL;
1415 ifd->class_[ifd->qo_] = NULL;
1416 ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
1417 ifd->queued_--;
1418 }
1419
1420 /*
1421 * void
1422 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1423 * over-limit action routines. These get invoked by rmc_under_limit()
1424 * if a class with packets to send if over its bandwidth limit & can't
1425 * borrow from a parent class.
1426 *
1427 * Returns: NONE
1428 */
1429
1430 static void
1431 rmc_drop_action(struct rm_class *cl)
1432 {
1433 struct rm_ifdat *ifd = cl->ifdat_;
1434
1435 ASSERT(qlen(cl->q_) > 0);
1436 _rmc_dropq(cl);
1437 if (qempty(cl->q_))
1438 ifd->na_[cl->pri_]--;
1439 }
1440
1441 void rmc_dropall(struct rm_class *cl)
1442 {
1443 struct rm_ifdat *ifd = cl->ifdat_;
1444
1445 if (!qempty(cl->q_)) {
1446 _flushq(cl->q_);
1447
1448 ifd->na_[cl->pri_]--;
1449 }
1450 }
1451
1452 #if (__FreeBSD_version > 300000)
1453 /* hzto() is removed from FreeBSD-3.0 */
1454 static int hzto(struct timeval *);
1455
1456 static int
1457 hzto(tv)
1458 struct timeval *tv;
1459 {
1460 struct timeval t2;
1461
1462 getmicrotime(&t2);
1463 t2.tv_sec = tv->tv_sec - t2.tv_sec;
1464 t2.tv_usec = tv->tv_usec - t2.tv_usec;
1465 return (tvtohz(&t2));
1466 }
1467 #endif /* __FreeBSD_version > 300000 */
1468
1469 /*
1470 * void
1471 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1472 * delay action routine. It is invoked via rmc_under_limit when the
1473 * packet is discoverd to be overlimit.
1474 *
1475 * If the delay action is result of borrow class being overlimit, then
1476 * delay for the offtime of the borrowing class that is overlimit.
1477 *
1478 * Returns: NONE
1479 */
1480
1481 void
1482 rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
1483 {
1484 int delay, t, extradelay;
1485
1486 cl->stats_.overactions++;
1487 TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
1488 #ifndef BORROW_OFFTIME
1489 delay += cl->offtime_;
1490 #endif
1491
1492 if (!cl->sleeping_) {
1493 CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
1494 #ifdef BORROW_OFFTIME
1495 if (borrow != NULL)
1496 extradelay = borrow->offtime_;
1497 else
1498 #endif
1499 extradelay = cl->offtime_;
1500
1501 #ifdef ALTQ
1502 /*
1503 * XXX recalculate suspend time:
1504 * current undertime is (tidle + pkt_time) calculated
1505 * from the last transmission.
1506 * tidle: time required to bring avgidle back to 0
1507 * pkt_time: target waiting time for this class
1508 * we need to replace pkt_time by offtime
1509 */
1510 extradelay -= cl->last_pkttime_;
1511 #endif
1512 if (extradelay > 0) {
1513 TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1514 delay += extradelay;
1515 }
1516
1517 cl->sleeping_ = 1;
1518 cl->stats_.delays++;
1519
1520 /*
1521 * Since packets are phased randomly with respect to the
1522 * clock, 1 tick (the next clock tick) can be an arbitrarily
1523 * short time so we have to wait for at least two ticks.
1524 * NOTE: If there's no other traffic, we need the timer as
1525 * a 'backstop' to restart this class.
1526 */
1527 if (delay > tick * 2) {
1528 /* FreeBSD rounds up the tick */
1529 t = hzto(&cl->undertime_);
1530 } else
1531 t = 2;
1532 CALLOUT_RESET(&cl->callout_, t, rmc_restart, cl);
1533 }
1534 }
1535
1536 /*
1537 * void
1538 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
1539 * called by the system timer code & is responsible checking if the
1540 * class is still sleeping (it might have been restarted as a side
1541 * effect of the queue scan on a packet arrival) and, if so, restarting
1542 * output for the class. Inspecting the class state & restarting output
1543 * require locking the class structure. In general the driver is
1544 * responsible for locking but this is the only routine that is not
1545 * called directly or indirectly from the interface driver so it has
1546 * know about system locking conventions. Under bsd, locking is done
1547 * by raising IPL to splimp so that's what's implemented here. On a
1548 * different system this would probably need to be changed.
1549 *
1550 * Returns: NONE
1551 */
1552
1553 static void
1554 rmc_restart(void *arg)
1555 {
1556 struct rm_class *cl = arg;
1557 struct rm_ifdat *ifd = cl->ifdat_;
1558 struct epoch_tracker et;
1559 int s;
1560
1561 s = splnet();
1562 NET_EPOCH_ENTER(et);
1563 IFQ_LOCK(ifd->ifq_);
1564 CURVNET_SET(ifd->ifq_->altq_ifp->if_vnet);
1565 if (cl->sleeping_) {
1566 cl->sleeping_ = 0;
1567 cl->undertime_.tv_sec = 0;
1568
1569 if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
1570 CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
1571 (ifd->restart)(ifd->ifq_);
1572 }
1573 }
1574 CURVNET_RESTORE();
1575 IFQ_UNLOCK(ifd->ifq_);
1576 NET_EPOCH_EXIT(et);
1577 splx(s);
1578 }
1579
1580 /*
1581 * void
1582 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1583 * handling routine for the root class of the link sharing structure.
1584 *
1585 * Returns: NONE
1586 */
1587
1588 static void
1589 rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow)
1590 {
1591 panic("rmc_root_overlimit");
1592 }
1593
1594 /*
1595 * Packet Queue handling routines. Eventually, this is to localize the
1596 * effects on the code whether queues are red queues or droptail
1597 * queues.
1598 */
1599
1600 static int
1601 _rmc_addq(rm_class_t *cl, mbuf_t *m)
1602 {
1603 #ifdef ALTQ_RIO
1604 if (q_is_rio(cl->q_))
1605 return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1606 #endif
1607 #ifdef ALTQ_RED
1608 if (q_is_red(cl->q_))
1609 return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1610 #endif /* ALTQ_RED */
1611 #ifdef ALTQ_CODEL
1612 if (q_is_codel(cl->q_))
1613 return codel_addq(cl->codel_, cl->q_, m);
1614 #endif
1615
1616 if (cl->flags_ & RMCF_CLEARDSCP)
1617 write_dsfield(m, cl->pktattr_, 0);
1618
1619 _addq(cl->q_, m);
1620 return (0);
1621 }
1622
1623 /* note: _rmc_dropq is not called for red */
1624 static void
1625 _rmc_dropq(rm_class_t *cl)
1626 {
1627 mbuf_t *m;
1628
1629 if ((m = _getq(cl->q_)) != NULL)
1630 m_freem(m);
1631 }
1632
1633 static mbuf_t *
1634 _rmc_getq(rm_class_t *cl)
1635 {
1636 #ifdef ALTQ_RIO
1637 if (q_is_rio(cl->q_))
1638 return rio_getq((rio_t *)cl->red_, cl->q_);
1639 #endif
1640 #ifdef ALTQ_RED
1641 if (q_is_red(cl->q_))
1642 return red_getq(cl->red_, cl->q_);
1643 #endif
1644 #ifdef ALTQ_CODEL
1645 if (q_is_codel(cl->q_))
1646 return codel_getq(cl->codel_, cl->q_);
1647 #endif
1648 return _getq(cl->q_);
1649 }
1650
1651 static mbuf_t *
1652 _rmc_pollq(rm_class_t *cl)
1653 {
1654 return qhead(cl->q_);
1655 }
1656
1657 #ifdef CBQ_TRACE
1658
1659 struct cbqtrace cbqtrace_buffer[NCBQTRACE+1];
1660 struct cbqtrace *cbqtrace_ptr = NULL;
1661 int cbqtrace_count;
1662
1663 /*
1664 * DDB hook to trace cbq events:
1665 * the last 1024 events are held in a circular buffer.
1666 * use "call cbqtrace_dump(N)" to display 20 events from Nth event.
1667 */
1668 void cbqtrace_dump(int);
1669 static char *rmc_funcname(void *);
1670
1671 static struct rmc_funcs {
1672 void *func;
1673 char *name;
1674 } rmc_funcs[] =
1675 {
1676 rmc_init, "rmc_init",
1677 rmc_queue_packet, "rmc_queue_packet",
1678 rmc_under_limit, "rmc_under_limit",
1679 rmc_update_class_util, "rmc_update_class_util",
1680 rmc_delay_action, "rmc_delay_action",
1681 rmc_restart, "rmc_restart",
1682 _rmc_wrr_dequeue_next, "_rmc_wrr_dequeue_next",
1683 NULL, NULL
1684 };
1685
1686 static char *rmc_funcname(void *func)
1687 {
1688 struct rmc_funcs *fp;
1689
1690 for (fp = rmc_funcs; fp->func != NULL; fp++)
1691 if (fp->func == func)
1692 return (fp->name);
1693 return ("unknown");
1694 }
1695
1696 void cbqtrace_dump(int counter)
1697 {
1698 int i, *p;
1699 char *cp;
1700
1701 counter = counter % NCBQTRACE;
1702 p = (int *)&cbqtrace_buffer[counter];
1703
1704 for (i=0; i<20; i++) {
1705 printf("[0x%x] ", *p++);
1706 printf("%s: ", rmc_funcname((void *)*p++));
1707 cp = (char *)p++;
1708 printf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
1709 printf("%d\n",*p++);
1710
1711 if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
1712 p = (int *)cbqtrace_buffer;
1713 }
1714 }
1715 #endif /* CBQ_TRACE */
1716 #endif /* ALTQ_CBQ */
1717
1718 #if defined(ALTQ_CBQ) || defined(ALTQ_RED) || defined(ALTQ_RIO) || \
1719 defined(ALTQ_HFSC) || defined(ALTQ_PRIQ) || defined(ALTQ_CODEL)
1720 #if !defined(__GNUC__) || defined(ALTQ_DEBUG)
1721
1722 void
1723 _addq(class_queue_t *q, mbuf_t *m)
1724 {
1725 mbuf_t *m0;
1726
1727 if ((m0 = qtail(q)) != NULL)
1728 m->m_nextpkt = m0->m_nextpkt;
1729 else
1730 m0 = m;
1731 m0->m_nextpkt = m;
1732 qtail(q) = m;
1733 qlen(q)++;
1734 }
1735
1736 mbuf_t *
1737 _getq(class_queue_t *q)
1738 {
1739 mbuf_t *m, *m0;
1740
1741 if ((m = qtail(q)) == NULL)
1742 return (NULL);
1743 if ((m0 = m->m_nextpkt) != m)
1744 m->m_nextpkt = m0->m_nextpkt;
1745 else {
1746 ASSERT(qlen(q) == 1);
1747 qtail(q) = NULL;
1748 }
1749 qlen(q)--;
1750 m0->m_nextpkt = NULL;
1751 return (m0);
1752 }
1753
1754 /* drop a packet at the tail of the queue */
1755 mbuf_t *
1756 _getq_tail(class_queue_t *q)
1757 {
1758 mbuf_t *m, *m0, *prev;
1759
1760 if ((m = m0 = qtail(q)) == NULL)
1761 return NULL;
1762 do {
1763 prev = m0;
1764 m0 = m0->m_nextpkt;
1765 } while (m0 != m);
1766 prev->m_nextpkt = m->m_nextpkt;
1767 if (prev == m) {
1768 ASSERT(qlen(q) == 1);
1769 qtail(q) = NULL;
1770 } else
1771 qtail(q) = prev;
1772 qlen(q)--;
1773 m->m_nextpkt = NULL;
1774 return (m);
1775 }
1776
1777 /* randomly select a packet in the queue */
1778 mbuf_t *
1779 _getq_random(class_queue_t *q)
1780 {
1781 struct mbuf *m;
1782 int i, n;
1783
1784 if ((m = qtail(q)) == NULL)
1785 return NULL;
1786 if (m->m_nextpkt == m) {
1787 ASSERT(qlen(q) == 1);
1788 qtail(q) = NULL;
1789 } else {
1790 struct mbuf *prev = NULL;
1791
1792 n = arc4random() % qlen(q) + 1;
1793 for (i = 0; i < n; i++) {
1794 prev = m;
1795 m = m->m_nextpkt;
1796 }
1797 prev->m_nextpkt = m->m_nextpkt;
1798 if (m == qtail(q))
1799 qtail(q) = prev;
1800 }
1801 qlen(q)--;
1802 m->m_nextpkt = NULL;
1803 return (m);
1804 }
1805
1806 void
1807 _removeq(class_queue_t *q, mbuf_t *m)
1808 {
1809 mbuf_t *m0, *prev;
1810
1811 m0 = qtail(q);
1812 do {
1813 prev = m0;
1814 m0 = m0->m_nextpkt;
1815 } while (m0 != m);
1816 prev->m_nextpkt = m->m_nextpkt;
1817 if (prev == m)
1818 qtail(q) = NULL;
1819 else if (qtail(q) == m)
1820 qtail(q) = prev;
1821 qlen(q)--;
1822 }
1823
1824 void
1825 _flushq(class_queue_t *q)
1826 {
1827 mbuf_t *m;
1828
1829 while ((m = _getq(q)) != NULL)
1830 m_freem(m);
1831 ASSERT(qlen(q) == 0);
1832 }
1833
1834 #endif /* !__GNUC__ || ALTQ_DEBUG */
1835 #endif /* ALTQ_CBQ || ALTQ_RED || ALTQ_RIO || ALTQ_HFSC || ALTQ_PRIQ */
Cache object: 0ce8654c61368ceacd92ad98bd3401b1
|