FreeBSD/Linux Kernel Cross Reference
sys/altq/altq_rio.c
1 /* $NetBSD: altq_rio.c,v 1.25 2021/09/21 14:30:15 christos Exp $ */
2 /* $KAME: altq_rio.c,v 1.19 2005/04/13 03:44:25 suz Exp $ */
3
4 /*
5 * Copyright (C) 1998-2003
6 * Sony Computer Science Laboratories Inc. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 /*
30 * Copyright (c) 1990-1994 Regents of the University of California.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the Computer Systems
44 * Engineering Group at Lawrence Berkeley Laboratory.
45 * 4. Neither the name of the University nor of the Laboratory may be used
46 * to endorse or promote products derived from this software without
47 * specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: altq_rio.c,v 1.25 2021/09/21 14:30:15 christos Exp $");
64
65 #ifdef _KERNEL_OPT
66 #include "opt_altq.h"
67 #include "opt_inet.h"
68 #include "pf.h"
69 #endif
70
71 #ifdef ALTQ_RIO /* rio is enabled by ALTQ_RIO option in opt_altq.h */
72
73 #include <sys/param.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/socket.h>
77 #include <sys/systm.h>
78 #include <sys/errno.h>
79 #include <sys/kauth.h>
80 #if 1 /* ALTQ3_COMPAT */
81 #include <sys/proc.h>
82 #include <sys/sockio.h>
83 #include <sys/kernel.h>
84 #endif
85
86 #include <net/if.h>
87
88 #include <netinet/in.h>
89 #include <netinet/in_systm.h>
90 #include <netinet/ip.h>
91 #ifdef INET6
92 #include <netinet/ip6.h>
93 #endif
94
95 #if NPF > 0
96 #include <net/pfvar.h>
97 #endif
98 #include <altq/altq.h>
99 #include <altq/altq_cdnr.h>
100 #include <altq/altq_red.h>
101 #include <altq/altq_rio.h>
102 #ifdef ALTQ3_COMPAT
103 #include <altq/altq_conf.h>
104 #endif
105
106 /*
107 * RIO: RED with IN/OUT bit
108 * described in
109 * "Explicit Allocation of Best Effort Packet Delivery Service"
110 * David D. Clark and Wenjia Fang, MIT Lab for Computer Science
111 * http://diffserv.lcs.mit.edu/Papers/exp-alloc-ddc-wf.{ps,pdf}
112 *
113 * this implementation is extended to support more than 2 drop precedence
114 * values as described in RFC2597 (Assured Forwarding PHB Group).
115 *
116 */
117 /*
118 * AF DS (differentiated service) codepoints.
119 * (classes can be mapped to CBQ or H-FSC classes.)
120 *
121 * 0 1 2 3 4 5 6 7
122 * +---+---+---+---+---+---+---+---+
123 * | CLASS |DropPre| 0 | CU |
124 * +---+---+---+---+---+---+---+---+
125 *
126 * class 1: 001
127 * class 2: 010
128 * class 3: 011
129 * class 4: 100
130 *
131 * low drop prec: 01
132 * medium drop prec: 10
133 * high drop prec: 11
134 */
135
136 /* normal red parameters */
137 #define W_WEIGHT 512 /* inverse of weight of EWMA (511/512) */
138 /* q_weight = 0.00195 */
139
140 /* red parameters for a slow link */
141 #define W_WEIGHT_1 128 /* inverse of weight of EWMA (127/128) */
142 /* q_weight = 0.0078125 */
143
144 /* red parameters for a very slow link (e.g., dialup) */
145 #define W_WEIGHT_2 64 /* inverse of weight of EWMA (63/64) */
146 /* q_weight = 0.015625 */
147
148 /* fixed-point uses 12-bit decimal places */
149 #define FP_SHIFT 12 /* fixed-point shift */
150
151 /* red parameters for drop probability */
152 #define INV_P_MAX 10 /* inverse of max drop probability */
153 #define TH_MIN 5 /* min threshold */
154 #define TH_MAX 15 /* max threshold */
155
156 #define RIO_LIMIT 60 /* default max queue length */
157 #define RIO_STATS /* collect statistics */
158
159 #define TV_DELTA(a, b, delta) { \
160 register int xxs; \
161 \
162 delta = (a)->tv_usec - (b)->tv_usec; \
163 if ((xxs = (a)->tv_sec - (b)->tv_sec) != 0) { \
164 if (xxs < 0) { \
165 delta = 60000000; \
166 } else if (xxs > 4) { \
167 if (xxs > 60) \
168 delta = 60000000; \
169 else \
170 delta += xxs * 1000000; \
171 } else while (xxs > 0) { \
172 delta += 1000000; \
173 xxs--; \
174 } \
175 } \
176 }
177
178 #ifdef ALTQ3_COMPAT
179 /* rio_list keeps all rio_queue_t's allocated. */
180 static rio_queue_t *rio_list = NULL;
181 #endif
182 /* default rio parameter values */
183 static struct redparams default_rio_params[RIO_NDROPPREC] = {
184 /* th_min, th_max, inv_pmax */
185 { TH_MAX * 2 + TH_MIN, TH_MAX * 3, INV_P_MAX }, /* low drop precedence */
186 { TH_MAX + TH_MIN, TH_MAX * 2, INV_P_MAX }, /* medium drop precedence */
187 { TH_MIN, TH_MAX, INV_P_MAX } /* high drop precedence */
188 };
189
190 /* internal function prototypes */
191 static int dscp2index(u_int8_t);
192 #ifdef ALTQ3_COMPAT
193 static int rio_enqueue(struct ifaltq *, struct mbuf *);
194 static struct mbuf *rio_dequeue(struct ifaltq *, int);
195 static int rio_request(struct ifaltq *, int, void *);
196 static int rio_detach(rio_queue_t *);
197
198 /*
199 * rio device interface
200 */
201 altqdev_decl(rio);
202
203 #endif /* ALTQ3_COMPAT */
204
205 rio_t *
206 rio_alloc(int weight, struct redparams *params, int flags, int pkttime)
207 {
208 rio_t *rp;
209 int w, i;
210 int npkts_per_sec;
211
212 rp = malloc(sizeof(rio_t), M_DEVBUF, M_WAITOK|M_ZERO);
213 if (rp == NULL)
214 return (NULL);
215
216 rp->rio_flags = flags;
217 if (pkttime == 0)
218 /* default packet time: 1000 bytes / 10Mbps * 8 * 1000000 */
219 rp->rio_pkttime = 800;
220 else
221 rp->rio_pkttime = pkttime;
222
223 if (weight != 0)
224 rp->rio_weight = weight;
225 else {
226 /* use default */
227 rp->rio_weight = W_WEIGHT;
228
229 /* when the link is very slow, adjust red parameters */
230 npkts_per_sec = 1000000 / rp->rio_pkttime;
231 if (npkts_per_sec < 50) {
232 /* up to about 400Kbps */
233 rp->rio_weight = W_WEIGHT_2;
234 } else if (npkts_per_sec < 300) {
235 /* up to about 2.4Mbps */
236 rp->rio_weight = W_WEIGHT_1;
237 }
238 }
239
240 /* calculate wshift. weight must be power of 2 */
241 w = rp->rio_weight;
242 for (i = 0; w > 1; i++)
243 w = w >> 1;
244 rp->rio_wshift = i;
245 w = 1 << rp->rio_wshift;
246 if (w != rp->rio_weight) {
247 printf("invalid weight value %d for red! use %d\n",
248 rp->rio_weight, w);
249 rp->rio_weight = w;
250 }
251
252 /* allocate weight table */
253 rp->rio_wtab = wtab_alloc(rp->rio_weight);
254
255 for (i = 0; i < RIO_NDROPPREC; i++) {
256 struct dropprec_state *prec = &rp->rio_precstate[i];
257
258 prec->avg = 0;
259 prec->idle = 1;
260
261 if (params == NULL || params[i].inv_pmax == 0)
262 prec->inv_pmax = default_rio_params[i].inv_pmax;
263 else
264 prec->inv_pmax = params[i].inv_pmax;
265 if (params == NULL || params[i].th_min == 0)
266 prec->th_min = default_rio_params[i].th_min;
267 else
268 prec->th_min = params[i].th_min;
269 if (params == NULL || params[i].th_max == 0)
270 prec->th_max = default_rio_params[i].th_max;
271 else
272 prec->th_max = params[i].th_max;
273
274 /*
275 * th_min_s and th_max_s are scaled versions of th_min
276 * and th_max to be compared with avg.
277 */
278 prec->th_min_s = prec->th_min << (rp->rio_wshift + FP_SHIFT);
279 prec->th_max_s = prec->th_max << (rp->rio_wshift + FP_SHIFT);
280
281 /*
282 * precompute probability denominator
283 * probd = (2 * (TH_MAX-TH_MIN) / pmax) in fixed-point
284 */
285 prec->probd = (2 * (prec->th_max - prec->th_min)
286 * prec->inv_pmax) << FP_SHIFT;
287
288 microtime(&prec->last);
289 }
290
291 return (rp);
292 }
293
294 void
295 rio_destroy(rio_t *rp)
296 {
297 wtab_destroy(rp->rio_wtab);
298 free(rp, M_DEVBUF);
299 }
300
301 void
302 rio_getstats(rio_t *rp, struct redstats *sp)
303 {
304 int i;
305
306 for (i = 0; i < RIO_NDROPPREC; i++) {
307 memcpy(sp, &rp->q_stats[i], sizeof(struct redstats));
308 sp->q_avg = rp->rio_precstate[i].avg >> rp->rio_wshift;
309 sp++;
310 }
311 }
312
313 #if (RIO_NDROPPREC == 3)
314 /*
315 * internally, a drop precedence value is converted to an index
316 * starting from 0.
317 */
318 static int
319 dscp2index(u_int8_t dscp)
320 {
321 int dpindex = dscp & AF_DROPPRECMASK;
322
323 if (dpindex == 0)
324 return (0);
325 return ((dpindex >> 3) - 1);
326 }
327 #endif
328
329 int
330 rio_addq(rio_t *rp, class_queue_t *q, struct mbuf *m,
331 struct altq_pktattr *pktattr)
332 {
333 int avg, droptype;
334 u_int8_t dsfield, odsfield;
335 int dpindex, i, n, t;
336 struct timeval now;
337 struct dropprec_state *prec;
338
339 dsfield = odsfield = read_dsfield(m, pktattr);
340 dpindex = dscp2index(dsfield);
341
342 /*
343 * update avg of the precedence states whose drop precedence
344 * is larger than or equal to the drop precedence of the packet
345 */
346 now.tv_sec = 0;
347 for (i = dpindex; i < RIO_NDROPPREC; i++) {
348 prec = &rp->rio_precstate[i];
349 avg = prec->avg;
350 if (prec->idle) {
351 prec->idle = 0;
352 if (now.tv_sec == 0)
353 microtime(&now);
354 t = (now.tv_sec - prec->last.tv_sec);
355 if (t > 60)
356 avg = 0;
357 else {
358 t = t * 1000000 +
359 (now.tv_usec - prec->last.tv_usec);
360 n = t / rp->rio_pkttime;
361 /* calculate (avg = (1 - Wq)^n * avg) */
362 if (n > 0)
363 avg = (avg >> FP_SHIFT) *
364 pow_w(rp->rio_wtab, n);
365 }
366 }
367
368 /* run estimator. (avg is scaled by WEIGHT in fixed-point) */
369 avg += (prec->qlen << FP_SHIFT) - (avg >> rp->rio_wshift);
370 prec->avg = avg; /* save the new value */
371 /*
372 * count keeps a tally of arriving traffic that has not
373 * been dropped.
374 */
375 prec->count++;
376 }
377
378 prec = &rp->rio_precstate[dpindex];
379 avg = prec->avg;
380
381 /* see if we drop early */
382 droptype = DTYPE_NODROP;
383 if (avg >= prec->th_min_s && prec->qlen > 1) {
384 if (avg >= prec->th_max_s) {
385 /* avg >= th_max: forced drop */
386 droptype = DTYPE_FORCED;
387 } else if (prec->old == 0) {
388 /* first exceeds th_min */
389 prec->count = 1;
390 prec->old = 1;
391 } else if (drop_early((avg - prec->th_min_s) >> rp->rio_wshift,
392 prec->probd, prec->count)) {
393 /* unforced drop by red */
394 droptype = DTYPE_EARLY;
395 }
396 } else {
397 /* avg < th_min */
398 prec->old = 0;
399 }
400
401 /*
402 * if the queue length hits the hard limit, it's a forced drop.
403 */
404 if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
405 droptype = DTYPE_FORCED;
406
407 if (droptype != DTYPE_NODROP) {
408 /* always drop incoming packet (as opposed to randomdrop) */
409 for (i = dpindex; i < RIO_NDROPPREC; i++)
410 rp->rio_precstate[i].count = 0;
411 #ifdef RIO_STATS
412 if (droptype == DTYPE_EARLY)
413 rp->q_stats[dpindex].drop_unforced++;
414 else
415 rp->q_stats[dpindex].drop_forced++;
416 PKTCNTR_ADD(&rp->q_stats[dpindex].drop_cnt, m_pktlen(m));
417 #endif
418 m_freem(m);
419 return (-1);
420 }
421
422 for (i = dpindex; i < RIO_NDROPPREC; i++)
423 rp->rio_precstate[i].qlen++;
424
425 /* save drop precedence index in mbuf hdr */
426 M_SETCTX(m, (intptr_t)dpindex);
427
428 if (rp->rio_flags & RIOF_CLEARDSCP)
429 dsfield &= ~DSCP_MASK;
430
431 if (dsfield != odsfield)
432 write_dsfield(m, pktattr, dsfield);
433
434 _addq(q, m);
435
436 #ifdef RIO_STATS
437 PKTCNTR_ADD(&rp->q_stats[dpindex].xmit_cnt, m_pktlen(m));
438 #endif
439 return (0);
440 }
441
442 struct mbuf *
443 rio_getq(rio_t *rp, class_queue_t *q)
444 {
445 struct mbuf *m;
446 int dpindex, i;
447
448 if ((m = _getq(q)) == NULL)
449 return NULL;
450
451 dpindex = M_GETCTX(m, intptr_t);
452 for (i = dpindex; i < RIO_NDROPPREC; i++) {
453 if (--rp->rio_precstate[i].qlen == 0) {
454 if (rp->rio_precstate[i].idle == 0) {
455 rp->rio_precstate[i].idle = 1;
456 microtime(&rp->rio_precstate[i].last);
457 }
458 }
459 }
460 return (m);
461 }
462
463 #ifdef ALTQ3_COMPAT
464 int
465 rioopen(dev_t dev, int flag, int fmt,
466 struct lwp *l)
467 {
468 /* everything will be done when the queueing scheme is attached. */
469 return 0;
470 }
471
472 int
473 rioclose(dev_t dev, int flag, int fmt,
474 struct lwp *l)
475 {
476 rio_queue_t *rqp;
477 int err, error = 0;
478
479 while ((rqp = rio_list) != NULL) {
480 /* destroy all */
481 err = rio_detach(rqp);
482 if (err != 0 && error == 0)
483 error = err;
484 }
485
486 return error;
487 }
488
489 int
490 rioioctl(dev_t dev, ioctlcmd_t cmd, void *addr, int flag,
491 struct lwp *l)
492 {
493 rio_queue_t *rqp;
494 struct rio_interface *ifacep;
495 struct ifnet *ifp;
496 int error = 0;
497
498 /* check super-user privilege */
499 switch (cmd) {
500 case RIO_GETSTATS:
501 break;
502 default:
503 if ((error = kauth_authorize_network(l->l_cred,
504 KAUTH_NETWORK_ALTQ, KAUTH_REQ_NETWORK_ALTQ_RIO, NULL,
505 NULL, NULL)) != 0)
506 return (error);
507 break;
508 }
509
510 switch (cmd) {
511
512 case RIO_ENABLE:
513 ifacep = (struct rio_interface *)addr;
514 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
515 error = EBADF;
516 break;
517 }
518 error = altq_enable(rqp->rq_ifq);
519 break;
520
521 case RIO_DISABLE:
522 ifacep = (struct rio_interface *)addr;
523 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
524 error = EBADF;
525 break;
526 }
527 error = altq_disable(rqp->rq_ifq);
528 break;
529
530 case RIO_IF_ATTACH:
531 ifp = ifunit(((struct rio_interface *)addr)->rio_ifname);
532 if (ifp == NULL) {
533 error = ENXIO;
534 break;
535 }
536
537 /* allocate and initialize rio_queue_t */
538 rqp = malloc(sizeof(rio_queue_t), M_DEVBUF, M_WAITOK|M_ZERO);
539 if (rqp == NULL) {
540 error = ENOMEM;
541 break;
542 }
543
544 rqp->rq_q = malloc(sizeof(class_queue_t), M_DEVBUF,
545 M_WAITOK|M_ZERO);
546 if (rqp->rq_q == NULL) {
547 free(rqp, M_DEVBUF);
548 error = ENOMEM;
549 break;
550 }
551
552 rqp->rq_rio = rio_alloc(0, NULL, 0, 0);
553 if (rqp->rq_rio == NULL) {
554 free(rqp->rq_q, M_DEVBUF);
555 free(rqp, M_DEVBUF);
556 error = ENOMEM;
557 break;
558 }
559
560 rqp->rq_ifq = &ifp->if_snd;
561 qtail(rqp->rq_q) = NULL;
562 qlen(rqp->rq_q) = 0;
563 qlimit(rqp->rq_q) = RIO_LIMIT;
564 qtype(rqp->rq_q) = Q_RIO;
565
566 /*
567 * set RIO to this ifnet structure.
568 */
569 error = altq_attach(rqp->rq_ifq, ALTQT_RIO, rqp,
570 rio_enqueue, rio_dequeue, rio_request,
571 NULL, NULL);
572 if (error) {
573 rio_destroy(rqp->rq_rio);
574 free(rqp->rq_q, M_DEVBUF);
575 free(rqp, M_DEVBUF);
576 break;
577 }
578
579 /* add this state to the rio list */
580 rqp->rq_next = rio_list;
581 rio_list = rqp;
582 break;
583
584 case RIO_IF_DETACH:
585 ifacep = (struct rio_interface *)addr;
586 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
587 error = EBADF;
588 break;
589 }
590 error = rio_detach(rqp);
591 break;
592
593 case RIO_GETSTATS:
594 do {
595 struct rio_stats *q_stats;
596 rio_t *rp;
597 int i;
598
599 q_stats = (struct rio_stats *)addr;
600 if ((rqp = altq_lookup(q_stats->iface.rio_ifname,
601 ALTQT_RIO)) == NULL) {
602 error = EBADF;
603 break;
604 }
605
606 rp = rqp->rq_rio;
607
608 q_stats->q_limit = qlimit(rqp->rq_q);
609 q_stats->weight = rp->rio_weight;
610 q_stats->flags = rp->rio_flags;
611
612 for (i = 0; i < RIO_NDROPPREC; i++) {
613 q_stats->q_len[i] = rp->rio_precstate[i].qlen;
614 memcpy(&q_stats->q_stats[i], &rp->q_stats[i],
615 sizeof(struct redstats));
616 q_stats->q_stats[i].q_avg =
617 rp->rio_precstate[i].avg >> rp->rio_wshift;
618
619 q_stats->q_params[i].inv_pmax
620 = rp->rio_precstate[i].inv_pmax;
621 q_stats->q_params[i].th_min
622 = rp->rio_precstate[i].th_min;
623 q_stats->q_params[i].th_max
624 = rp->rio_precstate[i].th_max;
625 }
626 } while (/*CONSTCOND*/ 0);
627 break;
628
629 case RIO_CONFIG:
630 do {
631 struct rio_conf *fc;
632 rio_t *new;
633 int s, limit, i;
634
635 fc = (struct rio_conf *)addr;
636 if ((rqp = altq_lookup(fc->iface.rio_ifname,
637 ALTQT_RIO)) == NULL) {
638 error = EBADF;
639 break;
640 }
641
642 new = rio_alloc(fc->rio_weight, &fc->q_params[0],
643 fc->rio_flags, fc->rio_pkttime);
644 if (new == NULL) {
645 error = ENOMEM;
646 break;
647 }
648
649 s = splnet();
650 _flushq(rqp->rq_q);
651 limit = fc->rio_limit;
652 if (limit < fc->q_params[RIO_NDROPPREC-1].th_max)
653 limit = fc->q_params[RIO_NDROPPREC-1].th_max;
654 qlimit(rqp->rq_q) = limit;
655
656 rio_destroy(rqp->rq_rio);
657 rqp->rq_rio = new;
658
659 splx(s);
660
661 /* write back new values */
662 fc->rio_limit = limit;
663 for (i = 0; i < RIO_NDROPPREC; i++) {
664 fc->q_params[i].inv_pmax =
665 rqp->rq_rio->rio_precstate[i].inv_pmax;
666 fc->q_params[i].th_min =
667 rqp->rq_rio->rio_precstate[i].th_min;
668 fc->q_params[i].th_max =
669 rqp->rq_rio->rio_precstate[i].th_max;
670 }
671 } while (/*CONSTCOND*/ 0);
672 break;
673
674 case RIO_SETDEFAULTS:
675 do {
676 struct redparams *rp;
677 int i;
678
679 rp = (struct redparams *)addr;
680 for (i = 0; i < RIO_NDROPPREC; i++)
681 default_rio_params[i] = rp[i];
682 } while (/*CONSTCOND*/ 0);
683 break;
684
685 default:
686 error = EINVAL;
687 break;
688 }
689
690 return error;
691 }
692
693 static int
694 rio_detach(rio_queue_t *rqp)
695 {
696 rio_queue_t *tmp;
697 int error = 0;
698
699 if (ALTQ_IS_ENABLED(rqp->rq_ifq))
700 altq_disable(rqp->rq_ifq);
701
702 if ((error = altq_detach(rqp->rq_ifq)))
703 return (error);
704
705 if (rio_list == rqp)
706 rio_list = rqp->rq_next;
707 else {
708 for (tmp = rio_list; tmp != NULL; tmp = tmp->rq_next)
709 if (tmp->rq_next == rqp) {
710 tmp->rq_next = rqp->rq_next;
711 break;
712 }
713 if (tmp == NULL)
714 printf("rio_detach: no state found in rio_list!\n");
715 }
716
717 rio_destroy(rqp->rq_rio);
718 free(rqp->rq_q, M_DEVBUF);
719 free(rqp, M_DEVBUF);
720 return (error);
721 }
722
723 /*
724 * rio support routines
725 */
726 static int
727 rio_request(struct ifaltq *ifq, int req, void *arg)
728 {
729 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
730
731 switch (req) {
732 case ALTRQ_PURGE:
733 _flushq(rqp->rq_q);
734 if (ALTQ_IS_ENABLED(ifq))
735 ifq->ifq_len = 0;
736 break;
737 }
738 return (0);
739 }
740
741 /*
742 * enqueue routine:
743 *
744 * returns: 0 when successfully queued.
745 * ENOBUFS when drop occurs.
746 */
747 static int
748 rio_enqueue(struct ifaltq *ifq, struct mbuf *m)
749 {
750 struct altq_pktattr pktattr;
751 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
752 int error = 0;
753
754 pktattr.pattr_class = m->m_pkthdr.pattr_class;
755 pktattr.pattr_af = m->m_pkthdr.pattr_af;
756 pktattr.pattr_hdr = m->m_pkthdr.pattr_hdr;
757
758 if (rio_addq(rqp->rq_rio, rqp->rq_q, m, &pktattr) == 0)
759 ifq->ifq_len++;
760 else
761 error = ENOBUFS;
762 return error;
763 }
764
765 /*
766 * dequeue routine:
767 * must be called in splnet.
768 *
769 * returns: mbuf dequeued.
770 * NULL when no packet is available in the queue.
771 */
772
773 static struct mbuf *
774 rio_dequeue(struct ifaltq *ifq, int op)
775 {
776 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
777 struct mbuf *m = NULL;
778
779 if (op == ALTDQ_POLL)
780 return qhead(rqp->rq_q);
781
782 m = rio_getq(rqp->rq_rio, rqp->rq_q);
783 if (m != NULL)
784 ifq->ifq_len--;
785 return m;
786 }
787
788 #ifdef KLD_MODULE
789
790 static struct altqsw rio_sw =
791 {"rio", rioopen, rioclose, rioioctl};
792
793 ALTQ_MODULE(altq_rio, ALTQT_RIO, &rio_sw);
794 MODULE_VERSION(altq_rio, 1);
795 MODULE_DEPEND(altq_rio, altq_red, 1, 1, 1);
796
797 #endif /* KLD_MODULE */
798 #endif /* ALTQ3_COMPAT */
799
800 #endif /* ALTQ_RIO */
Cache object: c3e24e64171ca430e01e8d3287b27ffb
|