1 /* $NetBSD: altq_rmclass.c,v 1.8 2003/11/09 22:11:12 christos Exp $ */
2 /* $KAME: altq_rmclass.c,v 1.9 2000/12/14 08:12:46 thorpej Exp $ */
3
4 /*
5 * Copyright (c) 1991-1997 Regents of the University of California.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the Network Research
19 * Group at Lawrence Berkeley Laboratory.
20 * 4. Neither the name of the University nor of the Laboratory may be used
21 * to endorse or promote products derived from this software without
22 * specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * LBL code modified by speer@eng.sun.com, May 1977.
37 * For questions and/or comments, please send mail to cbq@ee.lbl.gov
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: altq_rmclass.c,v 1.8 2003/11/09 22:11:12 christos Exp $");
42
43 #ident "@(#)rm_class.c 1.48 97/12/05 SMI"
44
45 #if defined(__FreeBSD__) || defined(__NetBSD__)
46 #include "opt_altq.h"
47 #if (__FreeBSD__ != 2)
48 #include "opt_inet.h"
49 #ifdef __FreeBSD__
50 #include "opt_inet6.h"
51 #endif
52 #endif
53 #endif /* __FreeBSD__ || __NetBSD__ */
54 #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
55
56 #include <sys/param.h>
57 #include <sys/malloc.h>
58 #include <sys/mbuf.h>
59 #include <sys/socket.h>
60 #include <sys/systm.h>
61 #include <sys/errno.h>
62 #include <sys/time.h>
63 #include <sys/kernel.h>
64
65 #include <net/if.h>
66 #include <netinet/in.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/ip.h>
69
70 #include <altq/altq.h>
71 #include <altq/altq_rmclass.h>
72 #include <altq/altq_rmclass_debug.h>
73 #include <altq/altq_red.h>
74 #include <altq/altq_rio.h>
75
76 #ifdef CBQ_TRACE
77 struct cbqtrace cbqtrace_buffer[NCBQTRACE+1];
78 struct cbqtrace *cbqtrace_ptr = NULL;
79 int cbqtrace_count;
80 #endif
81
82 /*
83 * Local Macros
84 */
85
86 #define reset_cutoff(ifd) { ifd->cutoff_ = RM_MAXDEPTH; }
87
88 /*
89 * Local routines.
90 */
91
92 static int rmc_satisfied __P((struct rm_class *, struct timeval *));
93 static void rmc_wrr_set_weights __P((struct rm_ifdat *));
94 static void rmc_depth_compute __P((struct rm_class *));
95 static void rmc_depth_recompute __P((rm_class_t *));
96
97 static mbuf_t *_rmc_wrr_dequeue_next __P((struct rm_ifdat *, int));
98 static mbuf_t *_rmc_prr_dequeue_next __P((struct rm_ifdat *, int));
99
100 static int _rmc_addq __P((rm_class_t *, mbuf_t *));
101 static void _rmc_dropq __P((rm_class_t *));
102 static mbuf_t *_rmc_getq __P((rm_class_t *));
103 static mbuf_t *_rmc_pollq __P((rm_class_t *));
104
105 static int rmc_under_limit __P((struct rm_class *, struct timeval *));
106 static void rmc_tl_satisfied __P((struct rm_ifdat *, struct timeval *));
107 static void rmc_drop_action __P((struct rm_class *));
108 static void rmc_restart __P((struct rm_class *));
109 static void rmc_root_overlimit __P((struct rm_class *, struct rm_class *));
110
111 /*
112 * BORROW_OFFTIME (experimental):
113 * borrow the offtime of the class borrowing from.
114 * the reason is that when its own offtime is set, the class is unable
115 * to borrow much, especially when cutoff is taking effect.
116 * but when the borrowed class is overloaded (advidle is close to minidle),
117 * use the borrowing class's offtime to avoid overload.
118 */
119 /*
120 * ADJUST_CUTOFF (experimental):
121 * if no underlimit class is found due to cutoff, increase cutoff and
122 * retry the scheduling loop.
123 * also, don't invoke delay_actions while cutoff is taking effect,
124 * since a sleeping class won't have a chance to be scheduled in the
125 * next loop.
126 *
127 * now heuristics for setting the top-level variable (cutoff_) becomes:
128 * 1. if a packet arrives for a not-overlimit class, set cutoff
129 * to the depth of the class.
130 * 2. if cutoff is i, and a packet arrives for an overlimit class
131 * with an underlimit ancestor at a lower level than i (say j),
132 * then set cutoff to j.
133 * 3. at scheduling a packet, if there is no underlimit class
134 * due to the current cutoff level, increase cutoff by 1 and
135 * then try to schedule again.
136 */
137
138 /*
139 * rm_class_t *
140 * rmc_newclass(...) - Create a new resource management class at priority
141 * 'pri' on the interface given by 'ifd'.
142 *
143 * nsecPerByte is the data rate of the interface in nanoseconds/byte.
144 * E.g., 800 for a 10Mb/s ethernet. If the class gets less
145 * than 100% of the bandwidth, this number should be the
146 * 'effective' rate for the class. Let f be the
147 * bandwidth fraction allocated to this class, and let
148 * nsPerByte be the data rate of the output link in
149 * nanoseconds/byte. Then nsecPerByte is set to
150 * nsPerByte / f. E.g., 1600 (= 800 / .5)
151 * for a class that gets 50% of an ethernet's bandwidth.
152 *
153 * action the routine to call when the class is over limit.
154 *
155 * maxq max allowable queue size for class (in packets).
156 *
157 * parent parent class pointer.
158 *
159 * borrow class to borrow from (should be either 'parent' or null).
160 *
161 * maxidle max value allowed for class 'idle' time estimate (this
162 * parameter determines how large an initial burst of packets
163 * can be before overlimit action is invoked.
164 *
165 * offtime how long 'delay' action will delay when class goes over
166 * limit (this parameter determines the steady-state burst
167 * size when a class is running over its limit).
168 *
169 * Maxidle and offtime have to be computed from the following: If the
170 * average packet size is s, the bandwidth fraction allocated to this
171 * class is f, we want to allow b packet bursts, and the gain of the
172 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
173 *
174 * ptime = s * nsPerByte * (1 - f) / f
175 * maxidle = ptime * (1 - g^b) / g^b
176 * minidle = -ptime * (1 / (f - 1))
177 * offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
178 *
179 * Operationally, it's convenient to specify maxidle & offtime in units
180 * independent of the link bandwidth so the maxidle & offtime passed to
181 * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
182 * (The constant factor is a scale factor needed to make the parameters
183 * integers. This scaling also means that the 'unscaled' values of
184 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
185 * not nanoseconds.) Also note that the 'idle' filter computation keeps
186 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
187 * maxidle also must be scaled upward by this value. Thus, the passed
188 * values for maxidle and offtime can be computed as follows:
189 *
190 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
191 * offtime = offtime * 8 / (1000 * nsecPerByte)
192 *
193 * When USE_HRTIME is employed, then maxidle and offtime become:
194 * maxidle = maxilde * (8.0 / nsecPerByte);
195 * offtime = offtime * (8.0 / nsecPerByte);
196 */
197
198 struct rm_class *
199 rmc_newclass(pri, ifd, nsecPerByte, action, maxq, parent, borrow,
200 maxidle, minidle, offtime, pktsize, flags)
201 int pri;
202 struct rm_ifdat *ifd;
203 u_int nsecPerByte;
204 void (*action)(rm_class_t *, rm_class_t *);
205 int maxq;
206 struct rm_class *parent;
207 struct rm_class *borrow;
208 u_int maxidle;
209 int minidle;
210 u_int offtime;
211 int pktsize;
212 int flags;
213 {
214 struct rm_class *cl;
215 struct rm_class *peer;
216 int s;
217
218 if (pri >= RM_MAXPRIO)
219 return (NULL);
220 #ifndef ALTQ_RED
221 if (flags & RMCF_RED) {
222 printf("rmc_newclass: RED not configured for CBQ!\n");
223 return (NULL);
224 }
225 #endif
226 #ifndef ALTQ_RIO
227 if (flags & RMCF_RIO) {
228 printf("rmc_newclass: RIO not configured for CBQ!\n");
229 return (NULL);
230 }
231 #endif
232
233 MALLOC(cl, struct rm_class *, sizeof(struct rm_class),
234 M_DEVBUF, M_WAITOK);
235 if (cl == NULL)
236 return (NULL);
237 (void)memset(cl, 0, sizeof(struct rm_class));
238 CALLOUT_INIT(&cl->callout_);
239 MALLOC(cl->q_, class_queue_t *, sizeof(class_queue_t),
240 M_DEVBUF, M_WAITOK);
241 if (cl->q_ == NULL) {
242 FREE(cl, M_DEVBUF);
243 return (NULL);
244 }
245 (void)memset(cl->q_, 0, sizeof(class_queue_t));
246
247 /*
248 * Class initialization.
249 */
250 cl->children_ = NULL;
251 cl->parent_ = parent;
252 cl->borrow_ = borrow;
253 cl->leaf_ = 1;
254 cl->ifdat_ = ifd;
255 cl->pri_ = pri;
256 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
257 cl->depth_ = 0;
258 cl->qthresh_ = 0;
259 cl->ns_per_byte_ = nsecPerByte;
260
261 qlimit(cl->q_) = maxq;
262 qtype(cl->q_) = Q_DROPHEAD;
263 qlen(cl->q_) = 0;
264 cl->flags_ = flags;
265
266 #if 1 /* minidle is also scaled in ALTQ */
267 cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
268 if (cl->minidle_ > 0)
269 cl->minidle_ = 0;
270 #else
271 cl->minidle_ = minidle;
272 #endif
273 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
274 if (cl->maxidle_ == 0)
275 cl->maxidle_ = 1;
276 #if 1 /* offtime is also scaled in ALTQ */
277 cl->avgidle_ = cl->maxidle_;
278 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
279 if (cl->offtime_ == 0)
280 cl->offtime_ = 1;
281 #else
282 cl->avgidle_ = 0;
283 cl->offtime_ = (offtime * nsecPerByte) / 8;
284 #endif
285 cl->overlimit = action;
286
287 #ifdef ALTQ_RED
288 if (flags & (RMCF_RED|RMCF_RIO)) {
289 int red_flags, red_pkttime;
290
291 red_flags = 0;
292 if (flags & RMCF_ECN)
293 red_flags |= REDF_ECN;
294 if (flags & RMCF_FLOWVALVE)
295 red_flags |= REDF_FLOWVALVE;
296 #ifdef ALTQ_RIO
297 if (flags & RMCF_CLEARDSCP)
298 red_flags |= RIOF_CLEARDSCP;
299 #endif
300 red_pkttime = nsecPerByte * pktsize / 1000;
301
302 if (flags & RMCF_RED) {
303 cl->red_ = red_alloc(0, 0, 0, 0,
304 red_flags, red_pkttime);
305 if (cl->red_ != NULL)
306 qtype(cl->q_) = Q_RED;
307 }
308 #ifdef ALTQ_RIO
309 else {
310 cl->red_ = (red_t *)rio_alloc(0, NULL,
311 red_flags, red_pkttime);
312 if (cl->red_ != NULL)
313 qtype(cl->q_) = Q_RIO;
314 }
315 #endif
316 }
317 #endif /* ALTQ_RED */
318
319 /*
320 * put the class into the class tree
321 */
322 s = splnet();
323 if ((peer = ifd->active_[pri]) != NULL) {
324 /* find the last class at this pri */
325 cl->peer_ = peer;
326 while (peer->peer_ != ifd->active_[pri])
327 peer = peer->peer_;
328 peer->peer_ = cl;
329 } else {
330 ifd->active_[pri] = cl;
331 cl->peer_ = cl;
332 }
333
334 if (cl->parent_) {
335 cl->next_ = parent->children_;
336 parent->children_ = cl;
337 parent->leaf_ = 0;
338 }
339
340 /*
341 * Compute the depth of this class and it's ancestors in the class
342 * hierarchy.
343 */
344 rmc_depth_compute(cl);
345
346 /*
347 * If CBQ's WRR is enabled, then initailize the class WRR state.
348 */
349 if (ifd->wrr_) {
350 ifd->num_[pri]++;
351 ifd->alloc_[pri] += cl->allotment_;
352 rmc_wrr_set_weights(ifd);
353 }
354 splx(s);
355 return (cl);
356 }
357
358 int
359 rmc_modclass(cl, nsecPerByte, maxq, maxidle, minidle, offtime, pktsize)
360 struct rm_class *cl;
361 u_int nsecPerByte;
362 int maxq;
363 u_int maxidle;
364 int minidle;
365 u_int offtime;
366 int pktsize;
367 {
368 struct rm_ifdat *ifd;
369 u_int old_allotment;
370 int s;
371
372 ifd = cl->ifdat_;
373 old_allotment = cl->allotment_;
374
375 s = splnet();
376 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
377 cl->qthresh_ = 0;
378 cl->ns_per_byte_ = nsecPerByte;
379
380 qlimit(cl->q_) = maxq;
381
382 #if 1 /* minidle is also scaled in ALTQ */
383 cl->minidle_ = (minidle * nsecPerByte) / 8;
384 if (cl->minidle_ > 0)
385 cl->minidle_ = 0;
386 #else
387 cl->minidle_ = minidle;
388 #endif
389 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
390 if (cl->maxidle_ == 0)
391 cl->maxidle_ = 1;
392 #if 1 /* offtime is also scaled in ALTQ */
393 cl->avgidle_ = cl->maxidle_;
394 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
395 if (cl->offtime_ == 0)
396 cl->offtime_ = 1;
397 #else
398 cl->avgidle_ = 0;
399 cl->offtime_ = (offtime * nsecPerByte) / 8;
400 #endif
401
402 /*
403 * If CBQ's WRR is enabled, then initailize the class WRR state.
404 */
405 if (ifd->wrr_) {
406 ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
407 rmc_wrr_set_weights(ifd);
408 }
409 splx(s);
410 return (0);
411 }
412
413 /*
414 * static void
415 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
416 * the appropriate run robin weights for the CBQ weighted round robin
417 * algorithm.
418 *
419 * Returns: NONE
420 */
421
422 static void
423 rmc_wrr_set_weights(ifd)
424 struct rm_ifdat *ifd;
425 {
426 int i;
427 struct rm_class *cl, *clh;
428
429 for (i = 0; i < RM_MAXPRIO; i++) {
430 /*
431 * This is inverted from that of the simulator to
432 * maintain precision.
433 */
434 if (ifd->num_[i] == 0)
435 ifd->M_[i] = 0;
436 else
437 ifd->M_[i] = ifd->alloc_[i] /
438 (ifd->num_[i] * ifd->maxpkt_);
439 /*
440 * Compute the weigthed allotment for each class.
441 * This takes the expensive div instruction out
442 * of the main loop for the wrr scheduling path.
443 * These only get recomputed when a class comes or
444 * goes.
445 */
446 if (ifd->active_[i] != NULL) {
447 clh = cl = ifd->active_[i];
448 do {
449 /* safe-guard for slow link or alloc_ == 0 */
450 if (ifd->M_[i] == 0)
451 cl->w_allotment_ = 0;
452 else
453 cl->w_allotment_ = cl->allotment_ /
454 ifd->M_[i];
455 cl = cl->peer_;
456 } while ((cl != NULL) && (cl != clh));
457 }
458 }
459 }
460
461 int
462 rmc_get_weight(ifd, pri)
463 struct rm_ifdat *ifd;
464 int pri;
465 {
466 if ((pri >= 0) && (pri < RM_MAXPRIO))
467 return (ifd->M_[pri]);
468 else
469 return (0);
470 }
471
472 /*
473 * static void
474 * rmc_depth_compute(struct rm_class *cl) - This function computes the
475 * appropriate depth of class 'cl' and its ancestors.
476 *
477 * Returns: NONE
478 */
479
480 static void
481 rmc_depth_compute(cl)
482 struct rm_class *cl;
483 {
484 rm_class_t *t = cl, *p;
485
486 /*
487 * Recompute the depth for the branch of the tree.
488 */
489 while (t != NULL) {
490 p = t->parent_;
491 if (p && (t->depth_ >= p->depth_)) {
492 p->depth_ = t->depth_ + 1;
493 t = p;
494 } else
495 t = NULL;
496 }
497 }
498
499 /*
500 * static void
501 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
502 * the depth of the tree after a class has been deleted.
503 *
504 * Returns: NONE
505 */
506
507 static void
508 rmc_depth_recompute(rm_class_t *cl)
509 {
510 #if 1 /* ALTQ */
511 rm_class_t *p, *t;
512
513 p = cl;
514 while (p != NULL) {
515 if ((t = p->children_) == NULL) {
516 p->depth_ = 0;
517 } else {
518 int cdepth = 0;
519
520 while (t != NULL) {
521 if (t->depth_ > cdepth)
522 cdepth = t->depth_;
523 t = t->next_;
524 }
525
526 if (p->depth_ == cdepth + 1)
527 /* no change to this parent */
528 return;
529
530 p->depth_ = cdepth + 1;
531 }
532
533 p = p->parent_;
534 }
535 #else
536 rm_class_t *t;
537
538 if (cl->depth_ >= 1) {
539 if (cl->children_ == NULL) {
540 cl->depth_ = 0;
541 } else if ((t = cl->children_) != NULL) {
542 while (t != NULL) {
543 if (t->children_ != NULL)
544 rmc_depth_recompute(t);
545 t = t->next_;
546 }
547 } else
548 rmc_depth_compute(cl);
549 }
550 #endif
551 }
552
553 /*
554 * void
555 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
556 * function deletes a class from the link-sharing stucture and frees
557 * all resources associated with the class.
558 *
559 * Returns: NONE
560 */
561
562 void
563 rmc_delete_class(ifd, cl)
564 struct rm_ifdat *ifd;
565 struct rm_class *cl;
566 {
567 struct rm_class *p, *head, *previous;
568 int s;
569
570 ASSERT(cl->children_ == NULL);
571
572 if (cl->sleeping_)
573 CALLOUT_STOP(&cl->callout_);
574
575 s = splnet();
576 /*
577 * Free packets in the packet queue.
578 * XXX - this may not be a desired behavior. Packets should be
579 * re-queued.
580 */
581 rmc_dropall(cl);
582
583 /*
584 * If the class has a parent, then remove the class from the
585 * class from the parent's children chain.
586 */
587 if (cl->parent_ != NULL) {
588 head = cl->parent_->children_;
589 p = previous = head;
590 if (head->next_ == NULL) {
591 ASSERT(head == cl);
592 cl->parent_->children_ = NULL;
593 cl->parent_->leaf_ = 1;
594 } else while (p != NULL) {
595 if (p == cl) {
596 if (cl == head)
597 cl->parent_->children_ = cl->next_;
598 else
599 previous->next_ = cl->next_;
600 cl->next_ = NULL;
601 p = NULL;
602 } else {
603 previous = p;
604 p = p->next_;
605 }
606 }
607 }
608
609 /*
610 * Delete class from class priority peer list.
611 */
612 if ((p = ifd->active_[cl->pri_]) != NULL) {
613 /*
614 * If there is more than one member of this priority
615 * level, then look for class(cl) in the priority level.
616 */
617 if (p != p->peer_) {
618 while (p->peer_ != cl)
619 p = p->peer_;
620 p->peer_ = cl->peer_;
621
622 if (ifd->active_[cl->pri_] == cl)
623 ifd->active_[cl->pri_] = cl->peer_;
624 } else {
625 ASSERT(p == cl);
626 ifd->active_[cl->pri_] = NULL;
627 }
628 }
629
630 /*
631 * Recompute the WRR weights.
632 */
633 if (ifd->wrr_) {
634 ifd->alloc_[cl->pri_] -= cl->allotment_;
635 ifd->num_[cl->pri_]--;
636 rmc_wrr_set_weights(ifd);
637 }
638
639 /*
640 * Re-compute the depth of the tree.
641 */
642 #if 1 /* ALTQ */
643 rmc_depth_recompute(cl->parent_);
644 #else
645 rmc_depth_recompute(ifd->root_);
646 #endif
647
648 splx(s);
649
650 /*
651 * Free the class structure.
652 */
653 if (cl->red_ != NULL) {
654 #ifdef ALTQ_RIO
655 if (q_is_rio(cl->q_))
656 rio_destroy((rio_t *)cl->red_);
657 #endif
658 #ifdef ALTQ_RED
659 if (q_is_red(cl->q_))
660 red_destroy(cl->red_);
661 #endif
662 }
663 FREE(cl->q_, M_DEVBUF);
664 FREE(cl, M_DEVBUF);
665 }
666
667
668 /*
669 * void
670 * rmc_init(...) - Initialize the resource management data structures
671 * associated with the output portion of interface 'ifp'. 'ifd' is
672 * where the structures will be built (for backwards compatibility, the
673 * structures aren't kept in the ifnet struct). 'nsecPerByte'
674 * gives the link speed (inverse of bandwidth) in nanoseconds/byte.
675 * 'restart' is the driver-specific routine that the generic 'delay
676 * until under limit' action will call to restart output. `maxq'
677 * is the queue size of the 'link' & 'default' classes. 'maxqueued'
678 * is the maximum number of packets that the resource management
679 * code will allow to be queued 'downstream' (this is typically 1).
680 *
681 * Returns: NONE
682 */
683
684 void
685 rmc_init(ifq, ifd, nsecPerByte, restart, maxq, maxqueued, maxidle,
686 minidle, offtime, flags)
687 struct ifaltq *ifq;
688 struct rm_ifdat *ifd;
689 u_int nsecPerByte;
690 void (*restart)(struct ifaltq *);
691 int maxq, maxqueued;
692 u_int maxidle;
693 int minidle;
694 u_int offtime;
695 int flags;
696 {
697 int i, mtu;
698
699 /*
700 * Initialize the CBQ traciing/debug facility.
701 */
702 CBQTRACEINIT();
703
704 (void)memset(ifd, 0, sizeof (*ifd));
705 mtu = ifq->altq_ifp->if_mtu;
706 ifd->ifq_ = ifq;
707 ifd->restart = restart;
708 ifd->maxqueued_ = maxqueued;
709 ifd->ns_per_byte_ = nsecPerByte;
710 ifd->maxpkt_ = mtu;
711 ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
712 ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
713 #if 1
714 ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16;
715 if (mtu * nsecPerByte > 10 * 1000000)
716 ifd->maxiftime_ /= 4;
717 #endif
718
719 reset_cutoff(ifd);
720 CBQTRACE(rmc_init, "INIT", ifd->cutoff_);
721
722 /*
723 * Initialize the CBQ's WRR state.
724 */
725 for (i = 0; i < RM_MAXPRIO; i++) {
726 ifd->alloc_[i] = 0;
727 ifd->M_[i] = 0;
728 ifd->num_[i] = 0;
729 ifd->na_[i] = 0;
730 ifd->active_[i] = NULL;
731 }
732
733 /*
734 * Initialize current packet state.
735 */
736 ifd->qi_ = 0;
737 ifd->qo_ = 0;
738 for (i = 0; i < RM_MAXQUEUED; i++) {
739 ifd->class_[i] = NULL;
740 ifd->curlen_[i] = 0;
741 ifd->borrowed_[i] = NULL;
742 }
743
744 /*
745 * Create the root class of the link-sharing structure.
746 */
747 if ((ifd->root_ = rmc_newclass(0, ifd,
748 nsecPerByte,
749 rmc_root_overlimit, maxq, 0, 0,
750 maxidle, minidle, offtime,
751 0, 0)) == NULL) {
752 printf("rmc_init: root class not allocated\n");
753 return ;
754 }
755 ifd->root_->depth_ = 0;
756 }
757
758 /*
759 * void
760 * rmc_queue_packet(struct rm_class *cl, mbuf_t *m) - Add packet given by
761 * mbuf 'm' to queue for resource class 'cl'. This routine is called
762 * by a driver's if_output routine. This routine must be called with
763 * output packet completion interrupts locked out (to avoid racing with
764 * rmc_dequeue_next).
765 *
766 * Returns: 0 on successful queueing
767 * -1 when packet drop occurs
768 */
769 int
770 rmc_queue_packet(cl, m)
771 struct rm_class *cl;
772 mbuf_t *m;
773 {
774 struct timeval now;
775 struct rm_ifdat *ifd = cl->ifdat_;
776 int cpri = cl->pri_;
777 int is_empty = qempty(cl->q_);
778
779 RM_GETTIME(now);
780 if (ifd->cutoff_ > 0) {
781 if (TV_LT(&cl->undertime_, &now)) {
782 if (ifd->cutoff_ > cl->depth_)
783 ifd->cutoff_ = cl->depth_;
784 CBQTRACE(rmc_queue_packet, "ffoc", cl->depth_);
785 }
786 #if 1 /* ALTQ */
787 else {
788 /*
789 * the class is overlimit. if the class has
790 * underlimit ancestors, set cutoff to the lowest
791 * depth among them.
792 */
793 struct rm_class *borrow = cl->borrow_;
794
795 while (borrow != NULL &&
796 borrow->depth_ < ifd->cutoff_) {
797 if (TV_LT(&borrow->undertime_, &now)) {
798 ifd->cutoff_ = borrow->depth_;
799 CBQTRACE(rmc_queue_packet, "ffob", ifd->cutoff_);
800 break;
801 }
802 borrow = borrow->borrow_;
803 }
804 }
805 #else /* !ALTQ */
806 else if ((ifd->cutoff_ > 1) && cl->borrow_) {
807 if (TV_LT(&cl->borrow_->undertime_, &now)) {
808 ifd->cutoff_ = cl->borrow_->depth_;
809 CBQTRACE(rmc_queue_packet, "ffob",
810 cl->borrow_->depth_);
811 }
812 }
813 #endif /* !ALTQ */
814 }
815
816 if (_rmc_addq(cl, m) < 0)
817 /* failed */
818 return (-1);
819
820 if (is_empty) {
821 CBQTRACE(rmc_queue_packet, "ytpe", cl->stats_.handle);
822 ifd->na_[cpri]++;
823 }
824
825 if (qlen(cl->q_) > qlimit(cl->q_)) {
826 /* note: qlimit can be set to 0 or 1 */
827 rmc_drop_action(cl);
828 return (-1);
829 }
830 return (0);
831 }
832
833 /*
834 * void
835 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
836 * classes to see if there are satified.
837 */
838
839 static void
840 rmc_tl_satisfied(ifd, now)
841 struct rm_ifdat *ifd;
842 struct timeval *now;
843 {
844 int i;
845 rm_class_t *p, *bp;
846
847 for (i = RM_MAXPRIO - 1; i >= 0; i--) {
848 if ((bp = ifd->active_[i]) != NULL) {
849 p = bp;
850 do {
851 if (!rmc_satisfied(p, now)) {
852 ifd->cutoff_ = p->depth_;
853 return;
854 }
855 p = p->peer_;
856 } while (p != bp);
857 }
858 }
859
860 reset_cutoff(ifd);
861 }
862
863 /*
864 * rmc_satisfied - Return 1 of the class is satisfied. O, otherwise.
865 */
866
867 static int
868 rmc_satisfied(cl, now)
869 struct rm_class *cl;
870 struct timeval *now;
871 {
872 rm_class_t *p;
873
874 if (cl == NULL)
875 return (1);
876 if (TV_LT(now, &cl->undertime_))
877 return (1);
878 if (cl->depth_ == 0) {
879 if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
880 return (0);
881 else
882 return (1);
883 }
884 if (cl->children_ != NULL) {
885 p = cl->children_;
886 while (p != NULL) {
887 if (!rmc_satisfied(p, now))
888 return (0);
889 p = p->next_;
890 }
891 }
892
893 return (1);
894 }
895
896 /*
897 * Return 1 if class 'cl' is under limit or can borrow from a parent,
898 * 0 if overlimit. As a side-effect, this routine will invoke the
899 * class overlimit action if the class if overlimit.
900 */
901
902 static int
903 rmc_under_limit(cl, now)
904 struct rm_class *cl;
905 struct timeval *now;
906 {
907 rm_class_t *p = cl;
908 rm_class_t *top;
909 struct rm_ifdat *ifd = cl->ifdat_;
910
911 ifd->borrowed_[ifd->qi_] = NULL;
912 /*
913 * If cl is the root class, then always return that it is
914 * underlimit. Otherwise, check to see if the class is underlimit.
915 */
916 if (cl->parent_ == NULL)
917 return (1);
918
919 if (cl->sleeping_) {
920 if (TV_LT(now, &cl->undertime_))
921 return (0);
922
923 CALLOUT_STOP(&cl->callout_);
924 cl->sleeping_ = 0;
925 cl->undertime_.tv_sec = 0;
926 return (1);
927 }
928
929 top = NULL;
930 while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
931 if (((cl = cl->borrow_) == NULL) ||
932 (cl->depth_ > ifd->cutoff_)) {
933 #ifdef ADJUST_CUTOFF
934 if (cl != NULL)
935 /* cutoff is taking effect, just
936 return false without calling
937 the delay action. */
938 return (0);
939 #endif
940 #ifdef BORROW_OFFTIME
941 /*
942 * check if the class can borrow offtime too.
943 * borrow offtime from the top of the borrow
944 * chain if the top class is not overloaded.
945 */
946 if (cl != NULL) {
947 /* cutoff is taking effect, use this class as top. */
948 top = cl;
949 CBQTRACE(rmc_under_limit, "ffou", ifd->cutoff_);
950 }
951 if (top != NULL && top->avgidle_ == top->minidle_)
952 top = NULL;
953 p->overtime_ = *now;
954 (p->overlimit)(p, top);
955 #else
956 p->overtime_ = *now;
957 (p->overlimit)(p, NULL);
958 #endif
959 return (0);
960 }
961 top = cl;
962 }
963
964 if (cl != p)
965 ifd->borrowed_[ifd->qi_] = cl;
966 return (1);
967 }
968
969 /*
970 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
971 * Packet-by-packet round robin.
972 *
973 * The heart of the weigthed round-robin scheduler, which decides which
974 * class next gets to send a packet. Highest priority first, then
975 * weighted round-robin within priorites.
976 *
977 * Each able-to-send class gets to send until its byte allocation is
978 * exhausted. Thus, the active pointer is only changed after a class has
979 * exhausted its allocation.
980 *
981 * If the scheduler finds no class that is underlimit or able to borrow,
982 * then the first class found that had a nonzero queue and is allowed to
983 * borrow gets to send.
984 */
985
986 static mbuf_t *
987 _rmc_wrr_dequeue_next(ifd, op)
988 struct rm_ifdat *ifd;
989 int op;
990 {
991 struct rm_class *cl = NULL, *first = NULL;
992 u_int deficit;
993 int cpri;
994 mbuf_t *m;
995 struct timeval now;
996
997 RM_GETTIME(now);
998
999 /*
1000 * if the driver polls the top of the queue and then removes
1001 * the polled packet, we must return the same packet.
1002 */
1003 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1004 cl = ifd->pollcache_;
1005 cpri = cl->pri_;
1006 if (ifd->efficient_) {
1007 /* check if this class is overlimit */
1008 if (cl->undertime_.tv_sec != 0 &&
1009 rmc_under_limit(cl, &now) == 0)
1010 first = cl;
1011 }
1012 ifd->pollcache_ = NULL;
1013 goto _wrr_out;
1014 }
1015 else {
1016 /* mode == ALTDQ_POLL || pollcache == NULL */
1017 ifd->pollcache_ = NULL;
1018 ifd->borrowed_[ifd->qi_] = NULL;
1019 }
1020 #ifdef ADJUST_CUTOFF
1021 _again:
1022 #endif
1023 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1024 if (ifd->na_[cpri] == 0)
1025 continue;
1026 deficit = 0;
1027 /*
1028 * Loop through twice for a priority level, if some class
1029 * was unable to send a packet the first round because
1030 * of the weighted round-robin mechanism.
1031 * During the second loop at this level, deficit==2.
1032 * (This second loop is not needed if for every class,
1033 * "M[cl->pri_])" times "cl->allotment" is greater than
1034 * the byte size for the largest packet in the class.)
1035 */
1036 _wrr_loop:
1037 cl = ifd->active_[cpri];
1038 ASSERT(cl != NULL);
1039 do {
1040 if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
1041 cl->bytes_alloc_ += cl->w_allotment_;
1042 if (!qempty(cl->q_)) {
1043 if ((cl->undertime_.tv_sec == 0) ||
1044 rmc_under_limit(cl, &now)) {
1045 if (cl->bytes_alloc_ > 0 || deficit > 1)
1046 goto _wrr_out;
1047
1048 /* underlimit but no alloc */
1049 deficit = 1;
1050 #if 1
1051 ifd->borrowed_[ifd->qi_] = NULL;
1052 #endif
1053 }
1054 else if (first == NULL && cl->borrow_ != NULL)
1055 first = cl; /* borrowing candidate */
1056 }
1057
1058 cl->bytes_alloc_ = 0;
1059 cl = cl->peer_;
1060 } while (cl != ifd->active_[cpri]);
1061
1062 if (deficit == 1) {
1063 /* first loop found an underlimit class with deficit */
1064 /* Loop on same priority level, with new deficit. */
1065 deficit = 2;
1066 goto _wrr_loop;
1067 }
1068 }
1069
1070 #ifdef ADJUST_CUTOFF
1071 /*
1072 * no underlimit class found. if cutoff is taking effect,
1073 * increase cutoff and try again.
1074 */
1075 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1076 ifd->cutoff_++;
1077 CBQTRACE(_rmc_wrr_dequeue_next, "ojda", ifd->cutoff_);
1078 goto _again;
1079 }
1080 #endif /* ADJUST_CUTOFF */
1081 /*
1082 * If LINK_EFFICIENCY is turned on, then the first overlimit
1083 * class we encounter will send a packet if all the classes
1084 * of the link-sharing structure are overlimit.
1085 */
1086 reset_cutoff(ifd);
1087 CBQTRACE(_rmc_wrr_dequeue_next, "otsr", ifd->cutoff_);
1088
1089 if (!ifd->efficient_ || first == NULL)
1090 return (NULL);
1091
1092 cl = first;
1093 cpri = cl->pri_;
1094 #if 0 /* too time-consuming for nothing */
1095 if (cl->sleeping_)
1096 CALLOUT_STOP(&cl->callout_);
1097 cl->sleeping_ = 0;
1098 cl->undertime_.tv_sec = 0;
1099 #endif
1100 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1101 ifd->cutoff_ = cl->borrow_->depth_;
1102
1103 /*
1104 * Deque the packet and do the book keeping...
1105 */
1106 _wrr_out:
1107 if (op == ALTDQ_REMOVE) {
1108 m = _rmc_getq(cl);
1109 if (m == NULL)
1110 panic("_rmc_wrr_dequeue_next");
1111 if (qempty(cl->q_))
1112 ifd->na_[cpri]--;
1113
1114 /*
1115 * Update class statistics and link data.
1116 */
1117 if (cl->bytes_alloc_ > 0)
1118 cl->bytes_alloc_ -= m_pktlen(m);
1119
1120 if ((cl->bytes_alloc_ <= 0) || first == cl)
1121 ifd->active_[cl->pri_] = cl->peer_;
1122 else
1123 ifd->active_[cl->pri_] = cl;
1124
1125 ifd->class_[ifd->qi_] = cl;
1126 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1127 ifd->now_[ifd->qi_] = now;
1128 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1129 ifd->queued_++;
1130 } else {
1131 /* mode == ALTDQ_PPOLL */
1132 m = _rmc_pollq(cl);
1133 ifd->pollcache_ = cl;
1134 }
1135 return (m);
1136 }
1137
1138 /*
1139 * Dequeue & return next packet from the highest priority class that
1140 * has a packet to send & has enough allocation to send it. This
1141 * routine is called by a driver whenever it needs a new packet to
1142 * output.
1143 */
1144 static mbuf_t *
1145 _rmc_prr_dequeue_next(ifd, op)
1146 struct rm_ifdat *ifd;
1147 int op;
1148 {
1149 mbuf_t *m;
1150 int cpri;
1151 struct rm_class *cl, *first = NULL;
1152 struct timeval now;
1153
1154 RM_GETTIME(now);
1155
1156 /*
1157 * if the driver polls the top of the queue and then removes
1158 * the polled packet, we must return the same packet.
1159 */
1160 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1161 cl = ifd->pollcache_;
1162 cpri = cl->pri_;
1163 ifd->pollcache_ = NULL;
1164 goto _prr_out;
1165 } else {
1166 /* mode == ALTDQ_POLL || pollcache == NULL */
1167 ifd->pollcache_ = NULL;
1168 ifd->borrowed_[ifd->qi_] = NULL;
1169 }
1170 #ifdef ADJUST_CUTOFF
1171 _again:
1172 #endif
1173 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1174 if (ifd->na_[cpri] == 0)
1175 continue;
1176 cl = ifd->active_[cpri];
1177 ASSERT(cl != NULL);
1178 do {
1179 if (!qempty(cl->q_)) {
1180 if ((cl->undertime_.tv_sec == 0) ||
1181 rmc_under_limit(cl, &now))
1182 goto _prr_out;
1183 if (first == NULL && cl->borrow_ != NULL)
1184 first = cl;
1185 }
1186 cl = cl->peer_;
1187 } while (cl != ifd->active_[cpri]);
1188 }
1189
1190 #ifdef ADJUST_CUTOFF
1191 /*
1192 * no underlimit class found. if cutoff is taking effect, increase
1193 * cutoff and try again.
1194 */
1195 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1196 ifd->cutoff_++;
1197 goto _again;
1198 }
1199 #endif /* ADJUST_CUTOFF */
1200 /*
1201 * If LINK_EFFICIENCY is turned on, then the first overlimit
1202 * class we encounter will send a packet if all the classes
1203 * of the link-sharing structure are overlimit.
1204 */
1205 reset_cutoff(ifd);
1206 if (!ifd->efficient_ || first == NULL)
1207 return (NULL);
1208
1209 cl = first;
1210 cpri = cl->pri_;
1211 #if 0 /* too time-consuming for nothing */
1212 if (cl->sleeping_)
1213 CALLOUT_STOP(&cl->callout_);
1214 cl->sleeping_ = 0;
1215 cl->undertime_.tv_sec = 0;
1216 #endif
1217 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1218 ifd->cutoff_ = cl->borrow_->depth_;
1219
1220 /*
1221 * Deque the packet and do the book keeping...
1222 */
1223 _prr_out:
1224 if (op == ALTDQ_REMOVE) {
1225 m = _rmc_getq(cl);
1226 if (m == NULL)
1227 panic("_rmc_prr_dequeue_next");
1228 if (qempty(cl->q_))
1229 ifd->na_[cpri]--;
1230
1231 ifd->active_[cpri] = cl->peer_;
1232
1233 ifd->class_[ifd->qi_] = cl;
1234 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1235 ifd->now_[ifd->qi_] = now;
1236 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1237 ifd->queued_++;
1238 } else {
1239 /* mode == ALTDQ_POLL */
1240 m = _rmc_pollq(cl);
1241 ifd->pollcache_ = cl;
1242 }
1243 return (m);
1244 }
1245
1246 /*
1247 * mbuf_t *
1248 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
1249 * is invoked by the packet driver to get the next packet to be
1250 * dequeued and output on the link. If WRR is enabled, then the
1251 * WRR dequeue next routine will determine the next packet to sent.
1252 * Otherwise, packet-by-packet round robin is invoked.
1253 *
1254 * Returns: NULL, if a packet is not available or if all
1255 * classes are overlimit.
1256 *
1257 * Otherwise, Pointer to the next packet.
1258 */
1259
1260 mbuf_t *
1261 rmc_dequeue_next(ifd, mode)
1262 struct rm_ifdat *ifd;
1263 int mode;
1264 {
1265 if (ifd->queued_ >= ifd->maxqueued_)
1266 return (NULL);
1267 else if (ifd->wrr_)
1268 return (_rmc_wrr_dequeue_next(ifd, mode));
1269 else
1270 return (_rmc_prr_dequeue_next(ifd, mode));
1271 }
1272
1273 /*
1274 * Update the utilization estimate for the packet that just completed.
1275 * The packet's class & the parent(s) of that class all get their
1276 * estimators updated. This routine is called by the driver's output-
1277 * packet-completion interrupt service routine.
1278 */
1279
1280 /*
1281 * a macro to approximate "divide by 1000" that gives 0.000999,
1282 * if a value has enough effective digits.
1283 * (on pentium, mul takes 9 cycles but div takes 46!)
1284 */
1285 #define NSEC_TO_USEC(t) (((t) >> 10) + ((t) >> 16) + ((t) >> 17))
1286 void
1287 rmc_update_class_util(ifd)
1288 struct rm_ifdat *ifd;
1289 {
1290 int idle, avgidle, pktlen;
1291 int pkt_time, tidle;
1292 rm_class_t *cl, *borrowed;
1293 rm_class_t *borrows;
1294 struct timeval *nowp;
1295
1296 /*
1297 * Get the most recent completed class.
1298 */
1299 if ((cl = ifd->class_[ifd->qo_]) == NULL)
1300 return;
1301
1302 pktlen = ifd->curlen_[ifd->qo_];
1303 borrowed = ifd->borrowed_[ifd->qo_];
1304 borrows = borrowed;
1305
1306 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1307
1308 /*
1309 * Run estimator on class and it's ancesstors.
1310 */
1311 /*
1312 * rm_update_class_util is designed to be called when the
1313 * transfer is completed from a xmit complete interrupt,
1314 * but most drivers don't implement an upcall for that.
1315 * so, just use estimated completion time.
1316 * as a result, ifd->qi_ and ifd->qo_ are always synced.
1317 */
1318 nowp = &ifd->now_[ifd->qo_];
1319 /* get pkt_time (for link) in usec */
1320 #if 1 /* use approximation */
1321 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_;
1322 pkt_time = NSEC_TO_USEC(pkt_time);
1323 #else
1324 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
1325 #endif
1326 #if 1 /* ALTQ4PPP */
1327 if (TV_LT(nowp, &ifd->ifnow_)) {
1328 int iftime;
1329
1330 /*
1331 * make sure the estimated completion time does not go
1332 * too far. it can happen when the link layer supports
1333 * data compression or the interface speed is set to
1334 * a much lower value.
1335 */
1336 TV_DELTA(&ifd->ifnow_, nowp, iftime);
1337 if (iftime+pkt_time < ifd->maxiftime_) {
1338 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1339 } else {
1340 TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
1341 }
1342 } else {
1343 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1344 }
1345 #else
1346 if (TV_LT(nowp, &ifd->ifnow_)) {
1347 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1348 } else {
1349 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1350 }
1351 #endif
1352
1353 while (cl != NULL) {
1354 TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
1355 if (idle >= 2000000)
1356 /*
1357 * this class is idle enough, reset avgidle.
1358 * (TV_DELTA returns 2000000 us when delta is large.)
1359 */
1360 cl->avgidle_ = cl->maxidle_;
1361
1362 /* get pkt_time (for class) in usec */
1363 #if 1 /* use approximation */
1364 pkt_time = pktlen * cl->ns_per_byte_;
1365 pkt_time = NSEC_TO_USEC(pkt_time);
1366 #else
1367 pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1368 #endif
1369 idle -= pkt_time;
1370
1371 avgidle = cl->avgidle_;
1372 avgidle += idle - (avgidle >> RM_FILTER_GAIN);
1373 cl->avgidle_ = avgidle;
1374
1375 /* Are we overlimit ? */
1376 if (avgidle <= 0) {
1377 CBQTRACE(rmc_update_class_util, "milo", cl->stats_.handle);
1378 #if 1 /* ALTQ */
1379 /*
1380 * need some lower bound for avgidle, otherwise
1381 * a borrowing class gets unbounded penalty.
1382 */
1383 if (avgidle < cl->minidle_)
1384 avgidle = cl->avgidle_ = cl->minidle_;
1385 #endif
1386 /* set next idle to make avgidle 0 */
1387 tidle = pkt_time +
1388 (((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
1389 TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
1390 ++cl->stats_.over;
1391 } else {
1392 cl->avgidle_ =
1393 (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1394 cl->undertime_.tv_sec = 0;
1395 if (cl->sleeping_) {
1396 CALLOUT_STOP(&cl->callout_);
1397 cl->sleeping_ = 0;
1398 }
1399 }
1400
1401 if (borrows != NULL) {
1402 if (borrows != cl)
1403 ++cl->stats_.borrows;
1404 else
1405 borrows = NULL;
1406 }
1407 cl->last_ = ifd->ifnow_;
1408 cl->last_pkttime_ = pkt_time;
1409
1410 #if 1
1411 if (cl->parent_ == NULL) {
1412 /* take stats of root class */
1413 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1414 }
1415 #endif
1416
1417 cl = cl->parent_;
1418 }
1419
1420 /*
1421 * Check to see if cutoff needs to set to a new level.
1422 */
1423 cl = ifd->class_[ifd->qo_];
1424 if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
1425 #if 1 /* ALTQ */
1426 if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
1427 rmc_tl_satisfied(ifd, nowp);
1428 CBQTRACE(rmc_update_class_util, "broe", ifd->cutoff_);
1429 } else {
1430 ifd->cutoff_ = borrowed->depth_;
1431 CBQTRACE(rmc_update_class_util, "ffob", borrowed->depth_);
1432 }
1433 #else /* !ALTQ */
1434 if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
1435 reset_cutoff(ifd);
1436 #ifdef notdef
1437 rmc_tl_satisfied(ifd, &now);
1438 #endif
1439 CBQTRACE(rmc_update_class_util, "broe", ifd->cutoff_);
1440 } else {
1441 ifd->cutoff_ = borrowed->depth_;
1442 CBQTRACE(rmc_update_class_util, "ffob", borrowed->depth_);
1443 }
1444 #endif /* !ALTQ */
1445 }
1446
1447 /*
1448 * Release class slot
1449 */
1450 ifd->borrowed_[ifd->qo_] = NULL;
1451 ifd->class_[ifd->qo_] = NULL;
1452 ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
1453 ifd->queued_--;
1454 }
1455
1456 /*
1457 * void
1458 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1459 * over-limit action routines. These get invoked by rmc_under_limit()
1460 * if a class with packets to send if over its bandwidth limit & can't
1461 * borrow from a parent class.
1462 *
1463 * Returns: NONE
1464 */
1465
1466 static void
1467 rmc_drop_action(cl)
1468 struct rm_class *cl;
1469 {
1470 struct rm_ifdat *ifd = cl->ifdat_;
1471
1472 ASSERT(qlen(cl->q_) > 0);
1473 _rmc_dropq(cl);
1474 if (qempty(cl->q_))
1475 ifd->na_[cl->pri_]--;
1476 }
1477
1478 void rmc_dropall(cl)
1479 struct rm_class *cl;
1480 {
1481 struct rm_ifdat *ifd = cl->ifdat_;
1482
1483 if (!qempty(cl->q_)) {
1484 _flushq(cl->q_);
1485
1486 ifd->na_[cl->pri_]--;
1487 }
1488 }
1489
1490 #if (__FreeBSD_version > 300000)
1491 /* hzto() is removed from FreeBSD-3.0 */
1492 static int hzto __P((struct timeval *));
1493
1494 static int
1495 hzto(tv)
1496 struct timeval *tv;
1497 {
1498 struct timeval t2;
1499
1500 getmicrotime(&t2);
1501 t2.tv_sec = tv->tv_sec - t2.tv_sec;
1502 t2.tv_usec = tv->tv_usec - t2.tv_usec;
1503 return (tvtohz(&t2));
1504 }
1505 #endif /* __FreeBSD_version > 300000 */
1506
1507 /*
1508 * void
1509 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1510 * delay action routine. It is invoked via rmc_under_limit when the
1511 * packet is discoverd to be overlimit.
1512 *
1513 * If the delay action is result of borrow class being overlimit, then
1514 * delay for the offtime of the borrowing class that is overlimit.
1515 *
1516 * Returns: NONE
1517 */
1518
1519 void
1520 rmc_delay_action(cl, borrow)
1521 struct rm_class *cl, *borrow;
1522 {
1523 int delay, t, extradelay;
1524
1525 cl->stats_.overactions++;
1526 TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
1527 #ifndef BORROW_OFFTIME
1528 delay += cl->offtime_;
1529 #endif
1530
1531 if (!cl->sleeping_) {
1532 CBQTRACE(rmc_delay_action, "yled", cl->stats_.handle);
1533 #ifdef BORROW_OFFTIME
1534 if (borrow != NULL)
1535 extradelay = borrow->offtime_;
1536 else
1537 #endif
1538 extradelay = cl->offtime_;
1539
1540 #ifdef ALTQ
1541 /*
1542 * XXX recalculate suspend time:
1543 * current undertime is (tidle + pkt_time) calculated
1544 * from the last transmission.
1545 * tidle: time required to bring avgidle back to 0
1546 * pkt_time: target waiting time for this class
1547 * we need to replace pkt_time by offtime
1548 */
1549 extradelay -= cl->last_pkttime_;
1550 #endif
1551 if (extradelay > 0) {
1552 TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1553 delay += extradelay;
1554 }
1555
1556 cl->sleeping_ = 1;
1557 cl->stats_.delays++;
1558
1559 /*
1560 * Since packets are phased randomly with respect to the
1561 * clock, 1 tick (the next clock tick) can be an arbitrarily
1562 * short time so we have to wait for at least two ticks.
1563 * NOTE: If there's no other traffic, we need the timer as
1564 * a 'backstop' to restart this class.
1565 */
1566 if (delay > tick * 2) {
1567 #ifdef __FreeBSD__
1568 /* FreeBSD rounds up the tick */
1569 t = hzto(&cl->undertime_);
1570 #else
1571 /* other BSDs round down the tick */
1572 t = hzto(&cl->undertime_) + 1;
1573 #endif
1574 } else
1575 t = 2;
1576 CALLOUT_RESET(&cl->callout_, t,
1577 (timeout_t *)rmc_restart, (caddr_t)cl);
1578 }
1579 }
1580
1581 /*
1582 * void
1583 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
1584 * called by the system timer code & is responsible checking if the
1585 * class is still sleeping (it might have been restarted as a side
1586 * effect of the queue scan on a packet arrival) and, if so, restarting
1587 * output for the class. Inspecting the class state & restarting output
1588 * require locking the class structure. In general the driver is
1589 * responsible for locking but this is the only routine that is not
1590 * called directly or indirectly from the interface driver so it has
1591 * know about system locking conventions. Under bsd, locking is done
1592 * by raising IPL to splnet so that's what's implemented here. On a
1593 * different system this would probably need to be changed.
1594 *
1595 * Returns: NONE
1596 */
1597
1598 static void
1599 rmc_restart(cl)
1600 struct rm_class *cl;
1601 {
1602 struct rm_ifdat *ifd = cl->ifdat_;
1603 int s;
1604
1605 s = splnet();
1606 if (cl->sleeping_) {
1607 cl->sleeping_ = 0;
1608 cl->undertime_.tv_sec = 0;
1609
1610 if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
1611 CBQTRACE(rmc_restart, "trts", cl->stats_.handle);
1612 (ifd->restart)(ifd->ifq_);
1613 }
1614 }
1615 splx(s);
1616 }
1617
1618 /*
1619 * void
1620 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1621 * handling routine for the root class of the link sharing structure.
1622 *
1623 * Returns: NONE
1624 */
1625
1626 static void
1627 rmc_root_overlimit(cl, borrow)
1628 struct rm_class *cl, *borrow;
1629 {
1630 panic("rmc_root_overlimit");
1631 }
1632
1633 /*
1634 * Packet Queue handling routines. Eventually, this is to localize the
1635 * effects on the code whether queues are red queues or droptail
1636 * queues.
1637 */
1638
1639 static int
1640 _rmc_addq(cl, m)
1641 rm_class_t *cl;
1642 mbuf_t *m;
1643 {
1644 #ifdef ALTQ_RIO
1645 if (q_is_rio(cl->q_))
1646 return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1647 #endif
1648 #ifdef ALTQ_RED
1649 if (q_is_red(cl->q_))
1650 return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1651 #endif /* ALTQ_RED */
1652
1653 if (cl->flags_ & RMCF_CLEARDSCP)
1654 write_dsfield(m, cl->pktattr_, 0);
1655
1656 _addq(cl->q_, m);
1657 return (0);
1658 }
1659
1660 /* note: _rmc_dropq is not called for red */
1661 static void
1662 _rmc_dropq(cl)
1663 rm_class_t *cl;
1664 {
1665 mbuf_t *m;
1666
1667 if ((m = _getq(cl->q_)) != NULL)
1668 m_freem(m);
1669 }
1670
1671 static mbuf_t *
1672 _rmc_getq(cl)
1673 rm_class_t *cl;
1674 {
1675 #ifdef ALTQ_RIO
1676 if (q_is_rio(cl->q_))
1677 return rio_getq((rio_t *)cl->red_, cl->q_);
1678 #endif
1679 #ifdef ALTQ_RED
1680 if (q_is_red(cl->q_))
1681 return red_getq(cl->red_, cl->q_);
1682 #endif
1683 return _getq(cl->q_);
1684 }
1685
1686 static mbuf_t *
1687 _rmc_pollq(cl)
1688 rm_class_t *cl;
1689 {
1690 return qhead(cl->q_);
1691 }
1692
1693 #ifdef CBQ_TRACE
1694
1695 /*
1696 * DDB hook to trace cbq events:
1697 * the last 1024 events are held in a circular buffer.
1698 * use "call cbqtrace_dump(N)" to display 20 events from Nth event.
1699 */
1700 void cbqtrace_dump(int);
1701 static char *rmc_funcname(void *);
1702
1703 static struct rmc_funcs {
1704 void *func;
1705 char *name;
1706 } rmc_funcs[] = {
1707 { rmc_init, "rmc_init" },
1708 { rmc_queue_packet, "rmc_queue_packet" },
1709 { rmc_under_limit, "rmc_under_limit" },
1710 { rmc_update_class_util, "rmc_update_class_util" },
1711 { rmc_delay_action, "rmc_delay_action" },
1712 { rmc_restart, "rmc_restart" },
1713 { _rmc_wrr_dequeue_next, "_rmc_wrr_dequeue_next" },
1714 { NULL, NULL }
1715 };
1716
1717 static char *rmc_funcname(func)
1718 void *func;
1719 {
1720 struct rmc_funcs *fp;
1721
1722 for (fp = rmc_funcs; fp->func != NULL; fp++)
1723 if (fp->func == func)
1724 return (fp->name);
1725 return ("unknown");
1726 }
1727
1728 void cbqtrace_dump(counter)
1729 int counter;
1730 {
1731 int i, *p;
1732 char *cp;
1733
1734 counter = counter % NCBQTRACE;
1735 p = (int *)&cbqtrace_buffer[counter];
1736
1737 for (i=0; i<20; i++) {
1738 printf("[0x%x] ", *p++);
1739 printf("%s: ", rmc_funcname((void *)*p++));
1740 cp = (char *)p++;
1741 printf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
1742 printf("%d\n",*p++);
1743
1744 if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
1745 p = (int *)cbqtrace_buffer;
1746 }
1747 }
1748 #endif /* CBQ_TRACE */
1749
1750 #endif /* ALTQ_CBQ */
1751
1752 #if defined(ALTQ_CBQ) || defined(ALTQ_RED) || defined(ALTQ_RIO) || defined(ALTQ_HFSC) || defined(ALTQ_PRIQ)
1753 #if !defined(__GNUC__) || defined(ALTQ_DEBUG)
1754
1755 void
1756 _addq(q, m)
1757 class_queue_t *q;
1758 mbuf_t *m;
1759 {
1760 mbuf_t *m0;
1761
1762 if ((m0 = qtail(q)) != NULL)
1763 m->m_nextpkt = m0->m_nextpkt;
1764 else
1765 m0 = m;
1766 m0->m_nextpkt = m;
1767 qtail(q) = m;
1768 qlen(q)++;
1769 }
1770
1771 mbuf_t *
1772 _getq(q)
1773 class_queue_t *q;
1774 {
1775 mbuf_t *m, *m0;
1776
1777 if ((m = qtail(q)) == NULL)
1778 return (NULL);
1779 if ((m0 = m->m_nextpkt) != m)
1780 m->m_nextpkt = m0->m_nextpkt;
1781 else {
1782 ASSERT(qlen(q) == 1);
1783 qtail(q) = NULL;
1784 }
1785 qlen(q)--;
1786 m0->m_nextpkt = NULL;
1787 return (m0);
1788 }
1789
1790 /* drop a packet at the tail of the queue */
1791 mbuf_t *
1792 _getq_tail(q)
1793 class_queue_t *q;
1794 {
1795 mbuf_t *m, *m0, *prev;
1796
1797 if ((m = m0 = qtail(q)) == NULL)
1798 return NULL;
1799 do {
1800 prev = m0;
1801 m0 = m0->m_nextpkt;
1802 } while (m0 != m);
1803 prev->m_nextpkt = m->m_nextpkt;
1804 if (prev == m) {
1805 ASSERT(qlen(q) == 1);
1806 qtail(q) = NULL;
1807 } else
1808 qtail(q) = prev;
1809 qlen(q)--;
1810 m->m_nextpkt = NULL;
1811 return (m);
1812 }
1813
1814 /* randomly select a packet in the queue */
1815 mbuf_t *
1816 _getq_random(q)
1817 class_queue_t *q;
1818 {
1819 struct mbuf *m;
1820 int i, n;
1821
1822 if ((m = qtail(q)) == NULL)
1823 return NULL;
1824 if (m->m_nextpkt == m) {
1825 ASSERT(qlen(q) == 1);
1826 qtail(q) = NULL;
1827 } else {
1828 struct mbuf *prev = NULL;
1829
1830 n = random() % qlen(q) + 1;
1831 for (i = 0; i < n; i++) {
1832 prev = m;
1833 m = m->m_nextpkt;
1834 }
1835 prev->m_nextpkt = m->m_nextpkt;
1836 if (m == qtail(q))
1837 qtail(q) = prev;
1838 }
1839 qlen(q)--;
1840 m->m_nextpkt = NULL;
1841 return (m);
1842 }
1843
1844 void
1845 _removeq(q, m)
1846 class_queue_t *q;
1847 mbuf_t *m;
1848 {
1849 mbuf_t *m0, *prev;
1850
1851 m0 = qtail(q);
1852 do {
1853 prev = m0;
1854 m0 = m0->m_nextpkt;
1855 } while (m0 != m);
1856 prev->m_nextpkt = m->m_nextpkt;
1857 if (prev == m)
1858 qtail(q) = NULL;
1859 else if (qtail(q) == m)
1860 qtail(q) = prev;
1861 qlen(q)--;
1862 }
1863
1864 void
1865 _flushq(q)
1866 class_queue_t *q;
1867 {
1868 mbuf_t *m;
1869
1870 while ((m = _getq(q)) != NULL)
1871 m_freem(m);
1872 ASSERT(qlen(q) == 0);
1873 }
1874
1875 #endif /* !__GNUC__ || ALTQ_DEBUG */
1876 #endif /* ALTQ_CBQ || ALTQ_RED || ALTQ_RIO || ALTQ_HFSC || ALTQ_PRIQ */
Cache object: cea331420a753d9ca9fb12c744a9bf84
|