1 /*
2 * FQ_Codel - The FlowQueue-Codel scheduler/AQM
3 *
4 * $FreeBSD$
5 *
6 * Copyright (C) 2016 Centre for Advanced Internet Architectures,
7 * Swinburne University of Technology, Melbourne, Australia.
8 * Portions of this code were made possible in part by a gift from
9 * The Comcast Innovation Fund.
10 * Implemented by Rasool Al-Saadi <ralsaadi@swin.edu.au>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34 #ifdef _KERNEL
35 #include <sys/malloc.h>
36 #include <sys/socket.h>
37 //#include <sys/socketvar.h>
38 #include <sys/kernel.h>
39 #include <sys/mbuf.h>
40 #include <sys/module.h>
41 #include <net/if.h> /* IFNAMSIZ */
42 #include <netinet/in.h>
43 #include <netinet/ip_var.h> /* ipfw_rule_ref */
44 #include <netinet/ip_fw.h> /* flow_id */
45 #include <netinet/ip_dummynet.h>
46
47 #include <sys/lock.h>
48 #include <sys/proc.h>
49 #include <sys/rwlock.h>
50
51 #include <netpfil/ipfw/ip_fw_private.h>
52 #include <sys/sysctl.h>
53 #include <netinet/ip.h>
54 #include <netinet/ip6.h>
55 #include <netinet/ip_icmp.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <sys/queue.h>
59 #include <sys/hash.h>
60
61 #include <netpfil/ipfw/dn_heap.h>
62 #include <netpfil/ipfw/ip_dn_private.h>
63
64 #include <netpfil/ipfw/dn_aqm.h>
65 #include <netpfil/ipfw/dn_aqm_codel.h>
66 #include <netpfil/ipfw/dn_sched.h>
67 #include <netpfil/ipfw/dn_sched_fq_codel.h>
68 #include <netpfil/ipfw/dn_sched_fq_codel_helper.h>
69
70 #else
71 #include <dn_test.h>
72 #endif
73
74 /* NOTE: In fq_codel module, we reimplements CoDel AQM functions
75 * because fq_codel use different flows (sub-queues) structure and
76 * dn_queue includes many variables not needed by a flow (sub-queue
77 * )i.e. avoid extra overhead (88 bytes vs 208 bytes).
78 * Also, CoDel functions manages stats of sub-queues as well as the main queue.
79 */
80
81 #define DN_SCHED_FQ_CODEL 6
82
83 static struct dn_alg fq_codel_desc;
84
85 /* fq_codel default parameters including codel */
86 struct dn_sch_fq_codel_parms
87 fq_codel_sysctl = {{5000 * AQM_TIME_1US, 100000 * AQM_TIME_1US,
88 CODEL_ECN_ENABLED}, 1024, 10240, 1514};
89
90 static int
91 fqcodel_sysctl_interval_handler(SYSCTL_HANDLER_ARGS)
92 {
93 int error;
94 long value;
95
96 value = fq_codel_sysctl.ccfg.interval;
97 value /= AQM_TIME_1US;
98 error = sysctl_handle_long(oidp, &value, 0, req);
99 if (error != 0 || req->newptr == NULL)
100 return (error);
101 if (value < 1 || value > 100 * AQM_TIME_1S)
102 return (EINVAL);
103 fq_codel_sysctl.ccfg.interval = value * AQM_TIME_1US ;
104
105 return (0);
106 }
107
108 static int
109 fqcodel_sysctl_target_handler(SYSCTL_HANDLER_ARGS)
110 {
111 int error;
112 long value;
113
114 value = fq_codel_sysctl.ccfg.target;
115 value /= AQM_TIME_1US;
116 error = sysctl_handle_long(oidp, &value, 0, req);
117 if (error != 0 || req->newptr == NULL)
118 return (error);
119 if (value < 1 || value > 5 * AQM_TIME_1S)
120 return (EINVAL);
121 fq_codel_sysctl.ccfg.target = value * AQM_TIME_1US ;
122
123 return (0);
124 }
125
126 SYSBEGIN(f4)
127
128 SYSCTL_DECL(_net_inet);
129 SYSCTL_DECL(_net_inet_ip);
130 SYSCTL_DECL(_net_inet_ip_dummynet);
131 static SYSCTL_NODE(_net_inet_ip_dummynet, OID_AUTO, fqcodel,
132 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
133 "FQ_CODEL");
134
135 #ifdef SYSCTL_NODE
136
137 SYSCTL_PROC(_net_inet_ip_dummynet_fqcodel, OID_AUTO, target,
138 CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
139 NULL, 0, fqcodel_sysctl_target_handler, "L",
140 "FQ_CoDel target in microsecond");
141 SYSCTL_PROC(_net_inet_ip_dummynet_fqcodel, OID_AUTO, interval,
142 CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
143 NULL, 0, fqcodel_sysctl_interval_handler, "L",
144 "FQ_CoDel interval in microsecond");
145
146 SYSCTL_UINT(_net_inet_ip_dummynet_fqcodel, OID_AUTO, quantum,
147 CTLFLAG_RW, &fq_codel_sysctl.quantum, 1514, "FQ_CoDel quantum");
148 SYSCTL_UINT(_net_inet_ip_dummynet_fqcodel, OID_AUTO, flows,
149 CTLFLAG_RW, &fq_codel_sysctl.flows_cnt, 1024,
150 "Number of queues for FQ_CoDel");
151 SYSCTL_UINT(_net_inet_ip_dummynet_fqcodel, OID_AUTO, limit,
152 CTLFLAG_RW, &fq_codel_sysctl.limit, 10240, "FQ_CoDel queues size limit");
153 #endif
154
155 /* Drop a packet form the head of codel queue */
156 static void
157 codel_drop_head(struct fq_codel_flow *q, struct fq_codel_si *si)
158 {
159 struct mbuf *m = q->mq.head;
160
161 if (m == NULL)
162 return;
163 q->mq.head = m->m_nextpkt;
164
165 fq_update_stats(q, si, -m->m_pkthdr.len, 1);
166
167 if (si->main_q.ni.length == 0) /* queue is now idle */
168 si->main_q.q_time = V_dn_cfg.curr_time;
169
170 FREE_PKT(m);
171 }
172
173 /* Enqueue a packet 'm' to a queue 'q' and add timestamp to that packet.
174 * Return 1 when unable to add timestamp, otherwise return 0
175 */
176 static int
177 codel_enqueue(struct fq_codel_flow *q, struct mbuf *m, struct fq_codel_si *si)
178 {
179 uint64_t len;
180
181 len = m->m_pkthdr.len;
182 /* finding maximum packet size */
183 if (len > q->cst.maxpkt_size)
184 q->cst.maxpkt_size = len;
185
186 /* Add timestamp to mbuf as MTAG */
187 struct m_tag *mtag;
188 mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL);
189 if (mtag == NULL)
190 mtag = m_tag_alloc(MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, sizeof(aqm_time_t),
191 M_NOWAIT);
192 if (mtag == NULL)
193 goto drop;
194 *(aqm_time_t *)(mtag + 1) = AQM_UNOW;
195 m_tag_prepend(m, mtag);
196
197 if (m->m_pkthdr.rcvif != NULL)
198 m_rcvif_serialize(m);
199
200 mq_append(&q->mq, m);
201 fq_update_stats(q, si, len, 0);
202 return 0;
203
204 drop:
205 fq_update_stats(q, si, len, 1);
206 m_freem(m);
207 return 1;
208 }
209
210 /*
211 * Classify a packet to queue number using Jenkins hash function.
212 * Return: queue number
213 * the input of the hash are protocol no, perturbation, src IP, dst IP,
214 * src port, dst port,
215 */
216 static inline int
217 fq_codel_classify_flow(struct mbuf *m, uint16_t fcount, struct fq_codel_si *si)
218 {
219 struct ip *ip;
220 struct tcphdr *th;
221 struct udphdr *uh;
222 uint8_t tuple[41];
223 uint16_t hash=0;
224
225 ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off);
226 //#ifdef INET6
227 struct ip6_hdr *ip6;
228 int isip6;
229 isip6 = (ip->ip_v == 6);
230
231 if(isip6) {
232 ip6 = (struct ip6_hdr *)ip;
233 *((uint8_t *) &tuple[0]) = ip6->ip6_nxt;
234 *((uint32_t *) &tuple[1]) = si->perturbation;
235 memcpy(&tuple[5], ip6->ip6_src.s6_addr, 16);
236 memcpy(&tuple[21], ip6->ip6_dst.s6_addr, 16);
237
238 switch (ip6->ip6_nxt) {
239 case IPPROTO_TCP:
240 th = (struct tcphdr *)(ip6 + 1);
241 *((uint16_t *) &tuple[37]) = th->th_dport;
242 *((uint16_t *) &tuple[39]) = th->th_sport;
243 break;
244
245 case IPPROTO_UDP:
246 uh = (struct udphdr *)(ip6 + 1);
247 *((uint16_t *) &tuple[37]) = uh->uh_dport;
248 *((uint16_t *) &tuple[39]) = uh->uh_sport;
249 break;
250 default:
251 memset(&tuple[37], 0, 4);
252 }
253
254 hash = jenkins_hash(tuple, 41, HASHINIT) % fcount;
255 return hash;
256 }
257 //#endif
258
259 /* IPv4 */
260 *((uint8_t *) &tuple[0]) = ip->ip_p;
261 *((uint32_t *) &tuple[1]) = si->perturbation;
262 *((uint32_t *) &tuple[5]) = ip->ip_src.s_addr;
263 *((uint32_t *) &tuple[9]) = ip->ip_dst.s_addr;
264
265 switch (ip->ip_p) {
266 case IPPROTO_TCP:
267 th = (struct tcphdr *)(ip + 1);
268 *((uint16_t *) &tuple[13]) = th->th_dport;
269 *((uint16_t *) &tuple[15]) = th->th_sport;
270 break;
271
272 case IPPROTO_UDP:
273 uh = (struct udphdr *)(ip + 1);
274 *((uint16_t *) &tuple[13]) = uh->uh_dport;
275 *((uint16_t *) &tuple[15]) = uh->uh_sport;
276 break;
277 default:
278 memset(&tuple[13], 0, 4);
279 }
280 hash = jenkins_hash(tuple, 17, HASHINIT) % fcount;
281
282 return hash;
283 }
284
285 /*
286 * Enqueue a packet into an appropriate queue according to
287 * FQ_CODEL algorithm.
288 */
289 static int
290 fq_codel_enqueue(struct dn_sch_inst *_si, struct dn_queue *_q,
291 struct mbuf *m)
292 {
293 struct fq_codel_si *si;
294 struct fq_codel_schk *schk;
295 struct dn_sch_fq_codel_parms *param;
296 struct dn_queue *mainq;
297 int idx, drop, i, maxidx;
298
299 mainq = (struct dn_queue *)(_si + 1);
300 si = (struct fq_codel_si *)_si;
301 schk = (struct fq_codel_schk *)(si->_si.sched+1);
302 param = &schk->cfg;
303
304 /* classify a packet to queue number*/
305 idx = fq_codel_classify_flow(m, param->flows_cnt, si);
306 /* enqueue packet into appropriate queue using CoDel AQM.
307 * Note: 'codel_enqueue' function returns 1 only when it unable to
308 * add timestamp to packet (no limit check)*/
309 drop = codel_enqueue(&si->flows[idx], m, si);
310
311 /* codel unable to timestamp a packet */
312 if (drop)
313 return 1;
314
315 /* If the flow (sub-queue) is not active ,then add it to the tail of
316 * new flows list, initialize and activate it.
317 */
318 if (!si->flows[idx].active ) {
319 STAILQ_INSERT_TAIL(&si->newflows, &si->flows[idx], flowchain);
320 si->flows[idx].deficit = param->quantum;
321 si->flows[idx].cst.dropping = false;
322 si->flows[idx].cst.first_above_time = 0;
323 si->flows[idx].active = 1;
324 //D("activate %d",idx);
325 }
326
327 /* check the limit for all queues and remove a packet from the
328 * largest one
329 */
330 if (mainq->ni.length > schk->cfg.limit) { D("over limit");
331 /* find first active flow */
332 for (maxidx = 0; maxidx < schk->cfg.flows_cnt; maxidx++)
333 if (si->flows[maxidx].active)
334 break;
335 if (maxidx < schk->cfg.flows_cnt) {
336 /* find the largest sub- queue */
337 for (i = maxidx + 1; i < schk->cfg.flows_cnt; i++)
338 if (si->flows[i].active && si->flows[i].stats.length >
339 si->flows[maxidx].stats.length)
340 maxidx = i;
341 codel_drop_head(&si->flows[maxidx], si);
342 D("maxidx = %d",maxidx);
343 drop = 1;
344 }
345 }
346
347 return drop;
348 }
349
350 /*
351 * Dequeue a packet from an appropriate queue according to
352 * FQ_CODEL algorithm.
353 */
354 static struct mbuf *
355 fq_codel_dequeue(struct dn_sch_inst *_si)
356 {
357 struct fq_codel_si *si;
358 struct fq_codel_schk *schk;
359 struct dn_sch_fq_codel_parms *param;
360 struct fq_codel_flow *f;
361 struct mbuf *mbuf;
362 struct fq_codel_list *fq_codel_flowlist;
363
364 si = (struct fq_codel_si *)_si;
365 schk = (struct fq_codel_schk *)(si->_si.sched+1);
366 param = &schk->cfg;
367
368 do {
369 /* select a list to start with */
370 if (STAILQ_EMPTY(&si->newflows))
371 fq_codel_flowlist = &si->oldflows;
372 else
373 fq_codel_flowlist = &si->newflows;
374
375 /* Both new and old queue lists are empty, return NULL */
376 if (STAILQ_EMPTY(fq_codel_flowlist))
377 return NULL;
378
379 f = STAILQ_FIRST(fq_codel_flowlist);
380 while (f != NULL) {
381 /* if there is no flow(sub-queue) deficit, increase deficit
382 * by quantum, move the flow to the tail of old flows list
383 * and try another flow.
384 * Otherwise, the flow will be used for dequeue.
385 */
386 if (f->deficit < 0) {
387 f->deficit += param->quantum;
388 STAILQ_REMOVE_HEAD(fq_codel_flowlist, flowchain);
389 STAILQ_INSERT_TAIL(&si->oldflows, f, flowchain);
390 } else
391 break;
392
393 f = STAILQ_FIRST(fq_codel_flowlist);
394 }
395
396 /* the new flows list is empty, try old flows list */
397 if (STAILQ_EMPTY(fq_codel_flowlist))
398 continue;
399
400 /* Dequeue a packet from the selected flow */
401 mbuf = fqc_codel_dequeue(f, si);
402
403 /* Codel did not return a packet */
404 if (!mbuf) {
405 /* If the selected flow belongs to new flows list, then move
406 * it to the tail of old flows list. Otherwise, deactivate it and
407 * remove it from the old list and
408 */
409 if (fq_codel_flowlist == &si->newflows) {
410 STAILQ_REMOVE_HEAD(fq_codel_flowlist, flowchain);
411 STAILQ_INSERT_TAIL(&si->oldflows, f, flowchain);
412 } else {
413 f->active = 0;
414 STAILQ_REMOVE_HEAD(fq_codel_flowlist, flowchain);
415 }
416 /* start again */
417 continue;
418 }
419
420 /* we have a packet to return,
421 * update flow deficit and return the packet*/
422 f->deficit -= mbuf->m_pkthdr.len;
423 return mbuf;
424
425 } while (1);
426
427 /* unreachable point */
428 return NULL;
429 }
430
431 /*
432 * Initialize fq_codel scheduler instance.
433 * also, allocate memory for flows array.
434 */
435 static int
436 fq_codel_new_sched(struct dn_sch_inst *_si)
437 {
438 struct fq_codel_si *si;
439 struct dn_queue *q;
440 struct fq_codel_schk *schk;
441 int i;
442
443 si = (struct fq_codel_si *)_si;
444 schk = (struct fq_codel_schk *)(_si->sched+1);
445
446 if(si->flows) {
447 D("si already configured!");
448 return 0;
449 }
450
451 /* init the main queue */
452 q = &si->main_q;
453 set_oid(&q->ni.oid, DN_QUEUE, sizeof(*q));
454 q->_si = _si;
455 q->fs = _si->sched->fs;
456
457 /* allocate memory for flows array */
458 si->flows = mallocarray(schk->cfg.flows_cnt,
459 sizeof(struct fq_codel_flow), M_DUMMYNET, M_NOWAIT | M_ZERO);
460 if (si->flows == NULL) {
461 D("cannot allocate memory for fq_codel configuration parameters");
462 return ENOMEM ;
463 }
464
465 /* init perturbation for this si */
466 si->perturbation = random();
467
468 /* init the old and new flows lists */
469 STAILQ_INIT(&si->newflows);
470 STAILQ_INIT(&si->oldflows);
471
472 /* init the flows (sub-queues) */
473 for (i = 0; i < schk->cfg.flows_cnt; i++) {
474 /* init codel */
475 si->flows[i].cst.maxpkt_size = 500;
476 }
477
478 fq_codel_desc.ref_count++;
479 return 0;
480 }
481
482 /*
483 * Free fq_codel scheduler instance.
484 */
485 static int
486 fq_codel_free_sched(struct dn_sch_inst *_si)
487 {
488 struct fq_codel_si *si = (struct fq_codel_si *)_si ;
489
490 /* free the flows array */
491 free(si->flows , M_DUMMYNET);
492 si->flows = NULL;
493 fq_codel_desc.ref_count--;
494
495 return 0;
496 }
497
498 /*
499 * Configure fq_codel scheduler.
500 * the configurations for the scheduler is passed from userland.
501 */
502 static int
503 fq_codel_config(struct dn_schk *_schk)
504 {
505 struct fq_codel_schk *schk;
506 struct dn_extra_parms *ep;
507 struct dn_sch_fq_codel_parms *fqc_cfg;
508
509 schk = (struct fq_codel_schk *)(_schk+1);
510 ep = (struct dn_extra_parms *) _schk->cfg;
511
512 /* par array contains fq_codel configuration as follow
513 * Codel: 0- target,1- interval, 2- flags
514 * FQ_CODEL: 3- quantum, 4- limit, 5- flows
515 */
516 if (ep && ep->oid.len ==sizeof(*ep) &&
517 ep->oid.subtype == DN_SCH_PARAMS) {
518 fqc_cfg = &schk->cfg;
519 if (ep->par[0] < 0)
520 fqc_cfg->ccfg.target = fq_codel_sysctl.ccfg.target;
521 else
522 fqc_cfg->ccfg.target = ep->par[0] * AQM_TIME_1US;
523
524 if (ep->par[1] < 0)
525 fqc_cfg->ccfg.interval = fq_codel_sysctl.ccfg.interval;
526 else
527 fqc_cfg->ccfg.interval = ep->par[1] * AQM_TIME_1US;
528
529 if (ep->par[2] < 0)
530 fqc_cfg->ccfg.flags = 0;
531 else
532 fqc_cfg->ccfg.flags = ep->par[2];
533
534 /* FQ configurations */
535 if (ep->par[3] < 0)
536 fqc_cfg->quantum = fq_codel_sysctl.quantum;
537 else
538 fqc_cfg->quantum = ep->par[3];
539
540 if (ep->par[4] < 0)
541 fqc_cfg->limit = fq_codel_sysctl.limit;
542 else
543 fqc_cfg->limit = ep->par[4];
544
545 if (ep->par[5] < 0)
546 fqc_cfg->flows_cnt = fq_codel_sysctl.flows_cnt;
547 else
548 fqc_cfg->flows_cnt = ep->par[5];
549
550 /* Bound the configurations */
551 fqc_cfg->ccfg.target = BOUND_VAR(fqc_cfg->ccfg.target, 1 ,
552 5 * AQM_TIME_1S); ;
553 fqc_cfg->ccfg.interval = BOUND_VAR(fqc_cfg->ccfg.interval, 1,
554 100 * AQM_TIME_1S);
555
556 fqc_cfg->quantum = BOUND_VAR(fqc_cfg->quantum,1, 9000);
557 fqc_cfg->limit= BOUND_VAR(fqc_cfg->limit,1,20480);
558 fqc_cfg->flows_cnt= BOUND_VAR(fqc_cfg->flows_cnt,1,65536);
559 }
560 else
561 return 1;
562
563 return 0;
564 }
565
566 /*
567 * Return fq_codel scheduler configurations
568 * the configurations for the scheduler is passed to userland.
569 */
570 static int
571 fq_codel_getconfig (struct dn_schk *_schk, struct dn_extra_parms *ep) {
572 struct fq_codel_schk *schk = (struct fq_codel_schk *)(_schk+1);
573 struct dn_sch_fq_codel_parms *fqc_cfg;
574
575 fqc_cfg = &schk->cfg;
576
577 strcpy(ep->name, fq_codel_desc.name);
578 ep->par[0] = fqc_cfg->ccfg.target / AQM_TIME_1US;
579 ep->par[1] = fqc_cfg->ccfg.interval / AQM_TIME_1US;
580 ep->par[2] = fqc_cfg->ccfg.flags;
581
582 ep->par[3] = fqc_cfg->quantum;
583 ep->par[4] = fqc_cfg->limit;
584 ep->par[5] = fqc_cfg->flows_cnt;
585
586 return 0;
587 }
588
589 /*
590 * fq_codel scheduler descriptor
591 * contains the type of the scheduler, the name, the size of extra
592 * data structures, and function pointers.
593 */
594 static struct dn_alg fq_codel_desc = {
595 _SI( .type = ) DN_SCHED_FQ_CODEL,
596 _SI( .name = ) "FQ_CODEL",
597 _SI( .flags = ) 0,
598
599 _SI( .schk_datalen = ) sizeof(struct fq_codel_schk),
600 _SI( .si_datalen = ) sizeof(struct fq_codel_si) - sizeof(struct dn_sch_inst),
601 _SI( .q_datalen = ) 0,
602
603 _SI( .enqueue = ) fq_codel_enqueue,
604 _SI( .dequeue = ) fq_codel_dequeue,
605 _SI( .config = ) fq_codel_config, /* new sched i.e. sched X config ...*/
606 _SI( .destroy = ) NULL, /*sched x delete */
607 _SI( .new_sched = ) fq_codel_new_sched, /* new schd instance */
608 _SI( .free_sched = ) fq_codel_free_sched, /* delete schd instance */
609 _SI( .new_fsk = ) NULL,
610 _SI( .free_fsk = ) NULL,
611 _SI( .new_queue = ) NULL,
612 _SI( .free_queue = ) NULL,
613 _SI( .getconfig = ) fq_codel_getconfig,
614 _SI( .ref_count = ) 0
615 };
616
617 DECLARE_DNSCHED_MODULE(dn_fq_codel, &fq_codel_desc);
Cache object: 0c218d9b6d3f3e27ea22408ac41a6b9e
|