1 /*
2 * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
3 * Portions Copyright (c) 2000 Akamba Corp.
4 * All rights reserved
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD$
28 */
29
30 #if !defined(KLD_MODULE)
31 #include "opt_ipfw.h" /* for IPFW2 definition */
32 #endif
33
34 #define DEB(x)
35 #define DDB(x) x
36
37 /*
38 * This module implements IP dummynet, a bandwidth limiter/delay emulator
39 * used in conjunction with the ipfw package.
40 * Description of the data structures used is in ip_dummynet.h
41 * Here you mainly find the following blocks of code:
42 * + variable declarations;
43 * + heap management functions;
44 * + scheduler and dummynet functions;
45 * + configuration and initialization.
46 *
47 * NOTA BENE: critical sections are protected by splimp()/splx()
48 * pairs. One would think that splnet() is enough as for most of
49 * the netinet code, but it is not so because when used with
50 * bridging, dummynet is invoked at splimp().
51 *
52 * Most important Changes:
53 *
54 * 011004: KLDable
55 * 010124: Fixed WF2Q behaviour
56 * 010122: Fixed spl protection.
57 * 000601: WF2Q support
58 * 000106: large rewrite, use heaps to handle very many pipes.
59 * 980513: initial release
60 *
61 * include files marked with XXX are probably not needed
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/malloc.h>
67 #include <sys/mbuf.h>
68 #include <sys/kernel.h>
69 #include <sys/module.h>
70 #include <sys/proc.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/time.h>
74 #include <sys/sysctl.h>
75 #include <net/if.h>
76 #include <net/route.h>
77 #include <netinet/in.h>
78 #include <netinet/in_systm.h>
79 #include <netinet/in_var.h>
80 #include <netinet/ip.h>
81 #include <netinet/ip_fw.h>
82 #include <netinet/ip_dummynet.h>
83 #include <netinet/ip_var.h>
84
85 #include <netinet/if_ether.h> /* for struct arpcom */
86 #include <net/bridge.h>
87
88 /*
89 * We keep a private variable for the simulation time, but we could
90 * probably use an existing one ("softticks" in sys/kern/kern_timeout.c)
91 */
92 static dn_key curr_time = 0 ; /* current simulation time */
93
94 static int dn_hash_size = 64 ; /* default hash size */
95
96 /* statistics on number of queue searches and search steps */
97 static int searches, search_steps ;
98 static int pipe_expire = 1 ; /* expire queue if empty */
99 static int dn_max_ratio = 16 ; /* max queues/buckets ratio */
100
101 static int red_lookup_depth = 256; /* RED - default lookup table depth */
102 static int red_avg_pkt_size = 512; /* RED - default medium packet size */
103 static int red_max_pkt_size = 1500; /* RED - default max packet size */
104
105 /*
106 * Three heaps contain queues and pipes that the scheduler handles:
107 *
108 * ready_heap contains all dn_flow_queue related to fixed-rate pipes.
109 *
110 * wfq_ready_heap contains the pipes associated with WF2Q flows
111 *
112 * extract_heap contains pipes associated with delay lines.
113 *
114 */
115
116 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
117
118 static struct dn_heap ready_heap, extract_heap, wfq_ready_heap ;
119
120 static int heap_init(struct dn_heap *h, int size) ;
121 static int heap_insert (struct dn_heap *h, dn_key key1, void *p);
122 static void heap_extract(struct dn_heap *h, void *obj);
123
124 static void transmit_event(struct dn_pipe *pipe);
125 static void ready_event(struct dn_flow_queue *q);
126
127 static struct dn_pipe *all_pipes = NULL ; /* list of all pipes */
128 static struct dn_flow_set *all_flow_sets = NULL ;/* list of all flow_sets */
129
130 static struct callout_handle dn_timeout;
131
132 #ifdef SYSCTL_NODE
133 SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet,
134 CTLFLAG_RW, 0, "Dummynet");
135 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size,
136 CTLFLAG_RW, &dn_hash_size, 0, "Default hash table size");
137 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, curr_time,
138 CTLFLAG_RD, &curr_time, 0, "Current tick");
139 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap,
140 CTLFLAG_RD, &ready_heap.size, 0, "Size of ready heap");
141 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap,
142 CTLFLAG_RD, &extract_heap.size, 0, "Size of extract heap");
143 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches,
144 CTLFLAG_RD, &searches, 0, "Number of queue searches");
145 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps,
146 CTLFLAG_RD, &search_steps, 0, "Number of queue search steps");
147 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire,
148 CTLFLAG_RW, &pipe_expire, 0, "Expire queue if empty");
149 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len,
150 CTLFLAG_RW, &dn_max_ratio, 0,
151 "Max ratio between dynamic queues and buckets");
152 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
153 CTLFLAG_RD, &red_lookup_depth, 0, "Depth of RED lookup table");
154 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
155 CTLFLAG_RD, &red_avg_pkt_size, 0, "RED Medium packet size");
156 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
157 CTLFLAG_RD, &red_max_pkt_size, 0, "RED Max packet size");
158 #endif
159
160 static int config_pipe(struct dn_pipe *p);
161 static int ip_dn_ctl(struct sockopt *sopt);
162
163 static void rt_unref(struct rtentry *);
164 static void dummynet(void *);
165 static void dummynet_flush(void);
166 void dummynet_drain(void);
167 static ip_dn_io_t dummynet_io;
168 static void dn_rule_delete(void *);
169
170 int if_tx_rdy(struct ifnet *ifp);
171
172 static void
173 rt_unref(struct rtentry *rt)
174 {
175 if (rt == NULL)
176 return ;
177 if (rt->rt_refcnt <= 0)
178 printf("dummynet: warning, refcnt now %ld, decreasing\n",
179 rt->rt_refcnt);
180 RTFREE(rt);
181 }
182
183 /*
184 * Heap management functions.
185 *
186 * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
187 * Some macros help finding parent/children so we can optimize them.
188 *
189 * heap_init() is called to expand the heap when needed.
190 * Increment size in blocks of 16 entries.
191 * XXX failure to allocate a new element is a pretty bad failure
192 * as we basically stall a whole queue forever!!
193 * Returns 1 on error, 0 on success
194 */
195 #define HEAP_FATHER(x) ( ( (x) - 1 ) / 2 )
196 #define HEAP_LEFT(x) ( 2*(x) + 1 )
197 #define HEAP_IS_LEFT(x) ( (x) & 1 )
198 #define HEAP_RIGHT(x) ( 2*(x) + 2 )
199 #define HEAP_SWAP(a, b, buffer) { buffer = a ; a = b ; b = buffer ; }
200 #define HEAP_INCREMENT 15
201
202 static int
203 heap_init(struct dn_heap *h, int new_size)
204 {
205 struct dn_heap_entry *p;
206
207 if (h->size >= new_size ) {
208 printf("dummynet: heap_init, Bogus call, have %d want %d\n",
209 h->size, new_size);
210 return 0 ;
211 }
212 new_size = (new_size + HEAP_INCREMENT ) & ~HEAP_INCREMENT ;
213 p = malloc(new_size * sizeof(*p), M_DUMMYNET, M_NOWAIT);
214 if (p == NULL) {
215 printf("dummynet: heap_init, resize %d failed\n", new_size );
216 return 1 ; /* error */
217 }
218 if (h->size > 0) {
219 bcopy(h->p, p, h->size * sizeof(*p) );
220 free(h->p, M_DUMMYNET);
221 }
222 h->p = p ;
223 h->size = new_size ;
224 return 0 ;
225 }
226
227 /*
228 * Insert element in heap. Normally, p != NULL, we insert p in
229 * a new position and bubble up. If p == NULL, then the element is
230 * already in place, and key is the position where to start the
231 * bubble-up.
232 * Returns 1 on failure (cannot allocate new heap entry)
233 *
234 * If offset > 0 the position (index, int) of the element in the heap is
235 * also stored in the element itself at the given offset in bytes.
236 */
237 #define SET_OFFSET(heap, node) \
238 if (heap->offset > 0) \
239 *((int *)((char *)(heap->p[node].object) + heap->offset)) = node ;
240 /*
241 * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
242 */
243 #define RESET_OFFSET(heap, node) \
244 if (heap->offset > 0) \
245 *((int *)((char *)(heap->p[node].object) + heap->offset)) = -1 ;
246 static int
247 heap_insert(struct dn_heap *h, dn_key key1, void *p)
248 {
249 int son = h->elements ;
250
251 if (p == NULL) /* data already there, set starting point */
252 son = key1 ;
253 else { /* insert new element at the end, possibly resize */
254 son = h->elements ;
255 if (son == h->size) /* need resize... */
256 if (heap_init(h, h->elements+1) )
257 return 1 ; /* failure... */
258 h->p[son].object = p ;
259 h->p[son].key = key1 ;
260 h->elements++ ;
261 }
262 while (son > 0) { /* bubble up */
263 int father = HEAP_FATHER(son) ;
264 struct dn_heap_entry tmp ;
265
266 if (DN_KEY_LT( h->p[father].key, h->p[son].key ) )
267 break ; /* found right position */
268 /* son smaller than father, swap and repeat */
269 HEAP_SWAP(h->p[son], h->p[father], tmp) ;
270 SET_OFFSET(h, son);
271 son = father ;
272 }
273 SET_OFFSET(h, son);
274 return 0 ;
275 }
276
277 /*
278 * remove top element from heap, or obj if obj != NULL
279 */
280 static void
281 heap_extract(struct dn_heap *h, void *obj)
282 {
283 int child, father, max = h->elements - 1 ;
284
285 if (max < 0) {
286 printf("dummynet: warning, extract from empty heap 0x%p\n", h);
287 return ;
288 }
289 father = 0 ; /* default: move up smallest child */
290 if (obj != NULL) { /* extract specific element, index is at offset */
291 if (h->offset <= 0)
292 panic("dummynet: heap_extract from middle not supported on this heap!!!\n");
293 father = *((int *)((char *)obj + h->offset)) ;
294 if (father < 0 || father >= h->elements) {
295 printf("dummynet: heap_extract, father %d out of bound 0..%d\n",
296 father, h->elements);
297 panic("dummynet: heap_extract");
298 }
299 }
300 RESET_OFFSET(h, father);
301 child = HEAP_LEFT(father) ; /* left child */
302 while (child <= max) { /* valid entry */
303 if (child != max && DN_KEY_LT(h->p[child+1].key, h->p[child].key) )
304 child = child+1 ; /* take right child, otherwise left */
305 h->p[father] = h->p[child] ;
306 SET_OFFSET(h, father);
307 father = child ;
308 child = HEAP_LEFT(child) ; /* left child for next loop */
309 }
310 h->elements-- ;
311 if (father != max) {
312 /*
313 * Fill hole with last entry and bubble up, reusing the insert code
314 */
315 h->p[father] = h->p[max] ;
316 heap_insert(h, father, NULL); /* this one cannot fail */
317 }
318 }
319
320 #if 0
321 /*
322 * change object position and update references
323 * XXX this one is never used!
324 */
325 static void
326 heap_move(struct dn_heap *h, dn_key new_key, void *object)
327 {
328 int temp;
329 int i ;
330 int max = h->elements-1 ;
331 struct dn_heap_entry buf ;
332
333 if (h->offset <= 0)
334 panic("cannot move items on this heap");
335
336 i = *((int *)((char *)object + h->offset));
337 if (DN_KEY_LT(new_key, h->p[i].key) ) { /* must move up */
338 h->p[i].key = new_key ;
339 for (; i>0 && DN_KEY_LT(new_key, h->p[(temp = HEAP_FATHER(i))].key) ;
340 i = temp ) { /* bubble up */
341 HEAP_SWAP(h->p[i], h->p[temp], buf) ;
342 SET_OFFSET(h, i);
343 }
344 } else { /* must move down */
345 h->p[i].key = new_key ;
346 while ( (temp = HEAP_LEFT(i)) <= max ) { /* found left child */
347 if ((temp != max) && DN_KEY_GT(h->p[temp].key, h->p[temp+1].key))
348 temp++ ; /* select child with min key */
349 if (DN_KEY_GT(new_key, h->p[temp].key)) { /* go down */
350 HEAP_SWAP(h->p[i], h->p[temp], buf) ;
351 SET_OFFSET(h, i);
352 } else
353 break ;
354 i = temp ;
355 }
356 }
357 SET_OFFSET(h, i);
358 }
359 #endif /* heap_move, unused */
360
361 /*
362 * heapify() will reorganize data inside an array to maintain the
363 * heap property. It is needed when we delete a bunch of entries.
364 */
365 static void
366 heapify(struct dn_heap *h)
367 {
368 int i ;
369
370 for (i = 0 ; i < h->elements ; i++ )
371 heap_insert(h, i , NULL) ;
372 }
373
374 /*
375 * cleanup the heap and free data structure
376 */
377 static void
378 heap_free(struct dn_heap *h)
379 {
380 if (h->size >0 )
381 free(h->p, M_DUMMYNET);
382 bzero(h, sizeof(*h) );
383 }
384
385 /*
386 * --- end of heap management functions ---
387 */
388
389 /*
390 * Scheduler functions:
391 *
392 * transmit_event() is called when the delay-line needs to enter
393 * the scheduler, either because of existing pkts getting ready,
394 * or new packets entering the queue. The event handled is the delivery
395 * time of the packet.
396 *
397 * ready_event() does something similar with fixed-rate queues, and the
398 * event handled is the finish time of the head pkt.
399 *
400 * wfq_ready_event() does something similar with WF2Q queues, and the
401 * event handled is the start time of the head pkt.
402 *
403 * In all cases, we make sure that the data structures are consistent
404 * before passing pkts out, because this might trigger recursive
405 * invocations of the procedures.
406 */
407 static void
408 transmit_event(struct dn_pipe *pipe)
409 {
410 struct dn_pkt *pkt ;
411
412 while ( (pkt = pipe->head) && DN_KEY_LEQ(pkt->output_time, curr_time) ) {
413 /*
414 * first unlink, then call procedures, since ip_input() can invoke
415 * ip_output() and viceversa, thus causing nested calls
416 */
417 pipe->head = DN_NEXT(pkt) ;
418
419 /*
420 * The actual mbuf is preceded by a struct dn_pkt, resembling an mbuf
421 * (NOT A REAL one, just a small block of malloc'ed memory) with
422 * m_type = MT_TAG, m_flags = PACKET_TAG_DUMMYNET
423 * dn_m (m_next) = actual mbuf to be processed by ip_input/output
424 * and some other fields.
425 * The block IS FREED HERE because it contains parameters passed
426 * to the called routine.
427 */
428 switch (pkt->dn_dir) {
429 case DN_TO_IP_OUT:
430 (void)ip_output((struct mbuf *)pkt, NULL, NULL, 0, NULL, NULL);
431 rt_unref (pkt->ro.ro_rt) ;
432 break ;
433
434 case DN_TO_IP_IN :
435 ip_input((struct mbuf *)pkt) ;
436 break ;
437
438 case DN_TO_BDG_FWD :
439 if (!BDG_LOADED) {
440 /* somebody unloaded the bridge module. Drop pkt */
441 printf("dummynet: dropping bridged packet trapped in pipe\n");
442 m_freem(pkt->dn_m);
443 break;
444 } /* fallthrough */
445 case DN_TO_ETH_DEMUX:
446 {
447 struct mbuf *m = (struct mbuf *)pkt ;
448 struct ether_header *eh;
449
450 if (pkt->dn_m->m_len < ETHER_HDR_LEN &&
451 (pkt->dn_m = m_pullup(pkt->dn_m, ETHER_HDR_LEN)) == NULL) {
452 printf("dummynet/bridge: pullup fail, dropping pkt\n");
453 break;
454 }
455 /*
456 * same as ether_input, make eh be a pointer into the mbuf
457 */
458 eh = mtod(pkt->dn_m, struct ether_header *);
459 m_adj(pkt->dn_m, ETHER_HDR_LEN);
460 /*
461 * bdg_forward() wants a pointer to the pseudo-mbuf-header, but
462 * on return it will supply the pointer to the actual packet
463 * (originally pkt->dn_m, but could be something else now) if
464 * it has not consumed it.
465 */
466 if (pkt->dn_dir == DN_TO_BDG_FWD) {
467 m = bdg_forward_ptr(m, eh, pkt->ifp);
468 if (m)
469 m_freem(m);
470 } else
471 ether_demux(NULL, eh, m); /* which consumes the mbuf */
472 }
473 break ;
474 case DN_TO_ETH_OUT:
475 ether_output_frame(pkt->ifp, (struct mbuf *)pkt);
476 break;
477
478 default:
479 printf("dummynet: bad switch %d!\n", pkt->dn_dir);
480 m_freem(pkt->dn_m);
481 break ;
482 }
483 free(pkt, M_DUMMYNET);
484 }
485 /* if there are leftover packets, put into the heap for next event */
486 if ( (pkt = pipe->head) )
487 heap_insert(&extract_heap, pkt->output_time, pipe ) ;
488 /* XXX should check errors on heap_insert, by draining the
489 * whole pipe p and hoping in the future we are more successful
490 */
491 }
492
493 /*
494 * the following macro computes how many ticks we have to wait
495 * before being able to transmit a packet. The credit is taken from
496 * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
497 */
498 #define SET_TICKS(pkt, q, p) \
499 (pkt->dn_m->m_pkthdr.len*8*hz - (q)->numbytes + p->bandwidth - 1 ) / \
500 p->bandwidth ;
501
502 /*
503 * extract pkt from queue, compute output time (could be now)
504 * and put into delay line (p_queue)
505 */
506 static void
507 move_pkt(struct dn_pkt *pkt, struct dn_flow_queue *q,
508 struct dn_pipe *p, int len)
509 {
510 q->head = DN_NEXT(pkt) ;
511 q->len-- ;
512 q->len_bytes -= len ;
513
514 pkt->output_time = curr_time + p->delay ;
515
516 if (p->head == NULL)
517 p->head = pkt;
518 else
519 DN_NEXT(p->tail) = pkt;
520 p->tail = pkt;
521 DN_NEXT(p->tail) = NULL;
522 }
523
524 /*
525 * ready_event() is invoked every time the queue must enter the
526 * scheduler, either because the first packet arrives, or because
527 * a previously scheduled event fired.
528 * On invokation, drain as many pkts as possible (could be 0) and then
529 * if there are leftover packets reinsert the pkt in the scheduler.
530 */
531 static void
532 ready_event(struct dn_flow_queue *q)
533 {
534 struct dn_pkt *pkt;
535 struct dn_pipe *p = q->fs->pipe ;
536 int p_was_empty ;
537
538 if (p == NULL) {
539 printf("dummynet: ready_event- pipe is gone\n");
540 return ;
541 }
542 p_was_empty = (p->head == NULL) ;
543
544 /*
545 * schedule fixed-rate queues linked to this pipe:
546 * Account for the bw accumulated since last scheduling, then
547 * drain as many pkts as allowed by q->numbytes and move to
548 * the delay line (in p) computing output time.
549 * bandwidth==0 (no limit) means we can drain the whole queue,
550 * setting len_scaled = 0 does the job.
551 */
552 q->numbytes += ( curr_time - q->sched_time ) * p->bandwidth;
553 while ( (pkt = q->head) != NULL ) {
554 int len = pkt->dn_m->m_pkthdr.len;
555 int len_scaled = p->bandwidth ? len*8*hz : 0 ;
556 if (len_scaled > q->numbytes )
557 break ;
558 q->numbytes -= len_scaled ;
559 move_pkt(pkt, q, p, len);
560 }
561 /*
562 * If we have more packets queued, schedule next ready event
563 * (can only occur when bandwidth != 0, otherwise we would have
564 * flushed the whole queue in the previous loop).
565 * To this purpose we record the current time and compute how many
566 * ticks to go for the finish time of the packet.
567 */
568 if ( (pkt = q->head) != NULL ) { /* this implies bandwidth != 0 */
569 dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
570 q->sched_time = curr_time ;
571 heap_insert(&ready_heap, curr_time + t, (void *)q );
572 /* XXX should check errors on heap_insert, and drain the whole
573 * queue on error hoping next time we are luckier.
574 */
575 } else { /* RED needs to know when the queue becomes empty */
576 q->q_time = curr_time;
577 q->numbytes = 0;
578 }
579 /*
580 * If the delay line was empty call transmit_event(p) now.
581 * Otherwise, the scheduler will take care of it.
582 */
583 if (p_was_empty)
584 transmit_event(p);
585 }
586
587 /*
588 * Called when we can transmit packets on WF2Q queues. Take pkts out of
589 * the queues at their start time, and enqueue into the delay line.
590 * Packets are drained until p->numbytes < 0. As long as
591 * len_scaled >= p->numbytes, the packet goes into the delay line
592 * with a deadline p->delay. For the last packet, if p->numbytes<0,
593 * there is an additional delay.
594 */
595 static void
596 ready_event_wfq(struct dn_pipe *p)
597 {
598 int p_was_empty = (p->head == NULL) ;
599 struct dn_heap *sch = &(p->scheduler_heap);
600 struct dn_heap *neh = &(p->not_eligible_heap) ;
601
602 if (p->if_name[0] == 0) /* tx clock is simulated */
603 p->numbytes += ( curr_time - p->sched_time ) * p->bandwidth;
604 else { /* tx clock is for real, the ifq must be empty or this is a NOP */
605 if (p->ifp && p->ifp->if_snd.ifq_head != NULL)
606 return ;
607 else {
608 DEB(printf("dummynet: pipe %d ready from %s --\n",
609 p->pipe_nr, p->if_name);)
610 }
611 }
612
613 /*
614 * While we have backlogged traffic AND credit, we need to do
615 * something on the queue.
616 */
617 while ( p->numbytes >=0 && (sch->elements>0 || neh->elements >0) ) {
618 if (sch->elements > 0) { /* have some eligible pkts to send out */
619 struct dn_flow_queue *q = sch->p[0].object ;
620 struct dn_pkt *pkt = q->head;
621 struct dn_flow_set *fs = q->fs;
622 u_int64_t len = pkt->dn_m->m_pkthdr.len;
623 int len_scaled = p->bandwidth ? len*8*hz : 0 ;
624
625 heap_extract(sch, NULL); /* remove queue from heap */
626 p->numbytes -= len_scaled ;
627 move_pkt(pkt, q, p, len);
628
629 p->V += (len<<MY_M) / p->sum ; /* update V */
630 q->S = q->F ; /* update start time */
631 if (q->len == 0) { /* Flow not backlogged any more */
632 fs->backlogged-- ;
633 heap_insert(&(p->idle_heap), q->F, q);
634 } else { /* still backlogged */
635 /*
636 * update F and position in backlogged queue, then
637 * put flow in not_eligible_heap (we will fix this later).
638 */
639 len = (q->head)->dn_m->m_pkthdr.len;
640 q->F += (len<<MY_M)/(u_int64_t) fs->weight ;
641 if (DN_KEY_LEQ(q->S, p->V))
642 heap_insert(neh, q->S, q);
643 else
644 heap_insert(sch, q->F, q);
645 }
646 }
647 /*
648 * now compute V = max(V, min(S_i)). Remember that all elements in sch
649 * have by definition S_i <= V so if sch is not empty, V is surely
650 * the max and we must not update it. Conversely, if sch is empty
651 * we only need to look at neh.
652 */
653 if (sch->elements == 0 && neh->elements > 0)
654 p->V = MAX64 ( p->V, neh->p[0].key );
655 /* move from neh to sch any packets that have become eligible */
656 while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V) ) {
657 struct dn_flow_queue *q = neh->p[0].object ;
658 heap_extract(neh, NULL);
659 heap_insert(sch, q->F, q);
660 }
661
662 if (p->if_name[0] != '\0') {/* tx clock is from a real thing */
663 p->numbytes = -1 ; /* mark not ready for I/O */
664 break ;
665 }
666 }
667 if (sch->elements == 0 && neh->elements == 0 && p->numbytes >= 0
668 && p->idle_heap.elements > 0) {
669 /*
670 * no traffic and no events scheduled. We can get rid of idle-heap.
671 */
672 int i ;
673
674 for (i = 0 ; i < p->idle_heap.elements ; i++) {
675 struct dn_flow_queue *q = p->idle_heap.p[i].object ;
676
677 q->F = 0 ;
678 q->S = q->F + 1 ;
679 }
680 p->sum = 0 ;
681 p->V = 0 ;
682 p->idle_heap.elements = 0 ;
683 }
684 /*
685 * If we are getting clocks from dummynet (not a real interface) and
686 * If we are under credit, schedule the next ready event.
687 * Also fix the delivery time of the last packet.
688 */
689 if (p->if_name[0]==0 && p->numbytes < 0) { /* this implies bandwidth >0 */
690 dn_key t=0 ; /* number of ticks i have to wait */
691
692 if (p->bandwidth > 0)
693 t = ( p->bandwidth -1 - p->numbytes) / p->bandwidth ;
694 p->tail->output_time += t ;
695 p->sched_time = curr_time ;
696 heap_insert(&wfq_ready_heap, curr_time + t, (void *)p);
697 /* XXX should check errors on heap_insert, and drain the whole
698 * queue on error hoping next time we are luckier.
699 */
700 }
701 /*
702 * If the delay line was empty call transmit_event(p) now.
703 * Otherwise, the scheduler will take care of it.
704 */
705 if (p_was_empty)
706 transmit_event(p);
707 }
708
709 /*
710 * This is called once per tick, or HZ times per second. It is used to
711 * increment the current tick counter and schedule expired events.
712 */
713 static void
714 dummynet(void * __unused unused)
715 {
716 void *p ; /* generic parameter to handler */
717 struct dn_heap *h ;
718 int s ;
719 struct dn_heap *heaps[3];
720 int i;
721 struct dn_pipe *pe ;
722
723 heaps[0] = &ready_heap ; /* fixed-rate queues */
724 heaps[1] = &wfq_ready_heap ; /* wfq queues */
725 heaps[2] = &extract_heap ; /* delay line */
726 s = splimp(); /* see note on top, splnet() is not enough */
727 curr_time++ ;
728 for (i=0; i < 3 ; i++) {
729 h = heaps[i];
730 while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time) ) {
731 DDB(if (h->p[0].key > curr_time)
732 printf("dummynet: warning, heap %d is %d ticks late\n",
733 i, (int)(curr_time - h->p[0].key));)
734 p = h->p[0].object ; /* store a copy before heap_extract */
735 heap_extract(h, NULL); /* need to extract before processing */
736 if (i == 0)
737 ready_event(p) ;
738 else if (i == 1) {
739 struct dn_pipe *pipe = p;
740 if (pipe->if_name[0] != '\0')
741 printf("dummynet: bad ready_event_wfq for pipe %s\n",
742 pipe->if_name);
743 else
744 ready_event_wfq(p) ;
745 } else
746 transmit_event(p);
747 }
748 }
749 /* sweep pipes trying to expire idle flow_queues */
750 for (pe = all_pipes; pe ; pe = pe->next )
751 if (pe->idle_heap.elements > 0 &&
752 DN_KEY_LT(pe->idle_heap.p[0].key, pe->V) ) {
753 struct dn_flow_queue *q = pe->idle_heap.p[0].object ;
754
755 heap_extract(&(pe->idle_heap), NULL);
756 q->S = q->F + 1 ; /* mark timestamp as invalid */
757 pe->sum -= q->fs->weight ;
758 }
759 splx(s);
760 dn_timeout = timeout(dummynet, NULL, 1);
761 }
762
763 /*
764 * called by an interface when tx_rdy occurs.
765 */
766 int
767 if_tx_rdy(struct ifnet *ifp)
768 {
769 struct dn_pipe *p;
770
771 for (p = all_pipes; p ; p = p->next )
772 if (p->ifp == ifp)
773 break ;
774 if (p == NULL) {
775 char buf[32];
776 sprintf(buf, "%s%d",ifp->if_name, ifp->if_unit);
777 for (p = all_pipes; p ; p = p->next )
778 if (!strcmp(p->if_name, buf) ) {
779 p->ifp = ifp ;
780 DEB(printf("dummynet: ++ tx rdy from %s (now found)\n", buf);)
781 break ;
782 }
783 }
784 if (p != NULL) {
785 DEB(printf("dummynet: ++ tx rdy from %s%d - qlen %d\n", ifp->if_name,
786 ifp->if_unit, ifp->if_snd.ifq_len);)
787 p->numbytes = 0 ; /* mark ready for I/O */
788 ready_event_wfq(p);
789 }
790 return 0;
791 }
792
793 /*
794 * Unconditionally expire empty queues in case of shortage.
795 * Returns the number of queues freed.
796 */
797 static int
798 expire_queues(struct dn_flow_set *fs)
799 {
800 struct dn_flow_queue *q, *prev ;
801 int i, initial_elements = fs->rq_elements ;
802
803 if (fs->last_expired == time_second)
804 return 0 ;
805 fs->last_expired = time_second ;
806 for (i = 0 ; i <= fs->rq_size ; i++) /* last one is overflow */
807 for (prev=NULL, q = fs->rq[i] ; q != NULL ; )
808 if (q->head != NULL || q->S != q->F+1) {
809 prev = q ;
810 q = q->next ;
811 } else { /* entry is idle, expire it */
812 struct dn_flow_queue *old_q = q ;
813
814 if (prev != NULL)
815 prev->next = q = q->next ;
816 else
817 fs->rq[i] = q = q->next ;
818 fs->rq_elements-- ;
819 free(old_q, M_DUMMYNET);
820 }
821 return initial_elements - fs->rq_elements ;
822 }
823
824 /*
825 * If room, create a new queue and put at head of slot i;
826 * otherwise, create or use the default queue.
827 */
828 static struct dn_flow_queue *
829 create_queue(struct dn_flow_set *fs, int i)
830 {
831 struct dn_flow_queue *q ;
832
833 if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
834 expire_queues(fs) == 0) {
835 /*
836 * No way to get room, use or create overflow queue.
837 */
838 i = fs->rq_size ;
839 if ( fs->rq[i] != NULL )
840 return fs->rq[i] ;
841 }
842 q = malloc(sizeof(*q), M_DUMMYNET, M_NOWAIT | M_ZERO);
843 if (q == NULL) {
844 printf("dummynet: sorry, cannot allocate queue for new flow\n");
845 return NULL ;
846 }
847 q->fs = fs ;
848 q->hash_slot = i ;
849 q->next = fs->rq[i] ;
850 q->S = q->F + 1; /* hack - mark timestamp as invalid */
851 fs->rq[i] = q ;
852 fs->rq_elements++ ;
853 return q ;
854 }
855
856 /*
857 * Given a flow_set and a pkt in last_pkt, find a matching queue
858 * after appropriate masking. The queue is moved to front
859 * so that further searches take less time.
860 */
861 static struct dn_flow_queue *
862 find_queue(struct dn_flow_set *fs, struct ipfw_flow_id *id)
863 {
864 int i = 0 ; /* we need i and q for new allocations */
865 struct dn_flow_queue *q, *prev;
866
867 if ( !(fs->flags_fs & DN_HAVE_FLOW_MASK) )
868 q = fs->rq[0] ;
869 else {
870 /* first, do the masking */
871 id->dst_ip &= fs->flow_mask.dst_ip ;
872 id->src_ip &= fs->flow_mask.src_ip ;
873 id->dst_port &= fs->flow_mask.dst_port ;
874 id->src_port &= fs->flow_mask.src_port ;
875 id->proto &= fs->flow_mask.proto ;
876 id->flags = 0 ; /* we don't care about this one */
877 /* then, hash function */
878 i = ( (id->dst_ip) & 0xffff ) ^
879 ( (id->dst_ip >> 15) & 0xffff ) ^
880 ( (id->src_ip << 1) & 0xffff ) ^
881 ( (id->src_ip >> 16 ) & 0xffff ) ^
882 (id->dst_port << 1) ^ (id->src_port) ^
883 (id->proto );
884 i = i % fs->rq_size ;
885 /* finally, scan the current list for a match */
886 searches++ ;
887 for (prev=NULL, q = fs->rq[i] ; q ; ) {
888 search_steps++;
889 if (id->dst_ip == q->id.dst_ip &&
890 id->src_ip == q->id.src_ip &&
891 id->dst_port == q->id.dst_port &&
892 id->src_port == q->id.src_port &&
893 id->proto == q->id.proto &&
894 id->flags == q->id.flags)
895 break ; /* found */
896 else if (pipe_expire && q->head == NULL && q->S == q->F+1 ) {
897 /* entry is idle and not in any heap, expire it */
898 struct dn_flow_queue *old_q = q ;
899
900 if (prev != NULL)
901 prev->next = q = q->next ;
902 else
903 fs->rq[i] = q = q->next ;
904 fs->rq_elements-- ;
905 free(old_q, M_DUMMYNET);
906 continue ;
907 }
908 prev = q ;
909 q = q->next ;
910 }
911 if (q && prev != NULL) { /* found and not in front */
912 prev->next = q->next ;
913 q->next = fs->rq[i] ;
914 fs->rq[i] = q ;
915 }
916 }
917 if (q == NULL) { /* no match, need to allocate a new entry */
918 q = create_queue(fs, i);
919 if (q != NULL)
920 q->id = *id ;
921 }
922 return q ;
923 }
924
925 static int
926 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
927 {
928 /*
929 * RED algorithm
930 *
931 * RED calculates the average queue size (avg) using a low-pass filter
932 * with an exponential weighted (w_q) moving average:
933 * avg <- (1-w_q) * avg + w_q * q_size
934 * where q_size is the queue length (measured in bytes or * packets).
935 *
936 * If q_size == 0, we compute the idle time for the link, and set
937 * avg = (1 - w_q)^(idle/s)
938 * where s is the time needed for transmitting a medium-sized packet.
939 *
940 * Now, if avg < min_th the packet is enqueued.
941 * If avg > max_th the packet is dropped. Otherwise, the packet is
942 * dropped with probability P function of avg.
943 *
944 */
945
946 int64_t p_b = 0;
947 /* queue in bytes or packets ? */
948 u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len;
949
950 DEB(printf("\ndummynet: %d q: %2u ", (int) curr_time, q_size);)
951
952 /* average queue size estimation */
953 if (q_size != 0) {
954 /*
955 * queue is not empty, avg <- avg + (q_size - avg) * w_q
956 */
957 int diff = SCALE(q_size) - q->avg;
958 int64_t v = SCALE_MUL((int64_t) diff, (int64_t) fs->w_q);
959
960 q->avg += (int) v;
961 } else {
962 /*
963 * queue is empty, find for how long the queue has been
964 * empty and use a lookup table for computing
965 * (1 - * w_q)^(idle_time/s) where s is the time to send a
966 * (small) packet.
967 * XXX check wraps...
968 */
969 if (q->avg) {
970 u_int t = (curr_time - q->q_time) / fs->lookup_step;
971
972 q->avg = (t < fs->lookup_depth) ?
973 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
974 }
975 }
976 DEB(printf("dummynet: avg: %u ", SCALE_VAL(q->avg));)
977
978 /* should i drop ? */
979
980 if (q->avg < fs->min_th) {
981 q->count = -1;
982 return 0; /* accept packet ; */
983 }
984 if (q->avg >= fs->max_th) { /* average queue >= max threshold */
985 if (fs->flags_fs & DN_IS_GENTLE_RED) {
986 /*
987 * According to Gentle-RED, if avg is greater than max_th the
988 * packet is dropped with a probability
989 * p_b = c_3 * avg - c_4
990 * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p
991 */
992 p_b = SCALE_MUL((int64_t) fs->c_3, (int64_t) q->avg) - fs->c_4;
993 } else {
994 q->count = -1;
995 DEB(printf("dummynet: - drop"););
996 return 1 ;
997 }
998 } else if (q->avg > fs->min_th) {
999 /*
1000 * we compute p_b using the linear dropping function p_b = c_1 *
1001 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 =
1002 * max_p * min_th / (max_th - min_th)
1003 */
1004 p_b = SCALE_MUL((int64_t) fs->c_1, (int64_t) q->avg) - fs->c_2;
1005 }
1006 if (fs->flags_fs & DN_QSIZE_IS_BYTES)
1007 p_b = (p_b * len) / fs->max_pkt_size;
1008 if (++q->count == 0)
1009 q->random = random() & 0xffff;
1010 else {
1011 /*
1012 * q->count counts packets arrived since last drop, so a greater
1013 * value of q->count means a greater packet drop probability.
1014 */
1015 if (SCALE_MUL(p_b, SCALE((int64_t) q->count)) > q->random) {
1016 q->count = 0;
1017 DEB(printf("dummynet: - red drop");)
1018 /* after a drop we calculate a new random value */
1019 q->random = random() & 0xffff;
1020 return 1; /* drop */
1021 }
1022 }
1023 /* end of RED algorithm */
1024 return 0 ; /* accept */
1025 }
1026
1027 static __inline
1028 struct dn_flow_set *
1029 locate_flowset(int pipe_nr, struct ip_fw *rule)
1030 {
1031 #if IPFW2
1032 struct dn_flow_set *fs;
1033 ipfw_insn *cmd = rule->cmd + rule->act_ofs;
1034
1035 if (cmd->opcode == O_LOG)
1036 cmd += F_LEN(cmd);
1037 #ifdef __i386__
1038 fs = ((ipfw_insn_pipe *)cmd)->pipe_ptr;
1039 #else
1040 bcopy(& ((ipfw_insn_pipe *)cmd)->pipe_ptr, &fs, sizeof(fs));
1041 #endif
1042
1043 if (fs != NULL)
1044 return fs;
1045
1046 if (cmd->opcode == O_QUEUE)
1047 #else /* !IPFW2 */
1048 struct dn_flow_set *fs = NULL ;
1049
1050 if ( (rule->fw_flg & IP_FW_F_COMMAND) == IP_FW_F_QUEUE )
1051 #endif /* !IPFW2 */
1052 for (fs=all_flow_sets; fs && fs->fs_nr != pipe_nr; fs=fs->next)
1053 ;
1054 else {
1055 struct dn_pipe *p1;
1056 for (p1 = all_pipes; p1 && p1->pipe_nr != pipe_nr; p1 = p1->next)
1057 ;
1058 if (p1 != NULL)
1059 fs = &(p1->fs) ;
1060 }
1061 /* record for the future */
1062 #if IPFW2
1063 #ifdef __i386__
1064 ((ipfw_insn_pipe *)cmd)->pipe_ptr = fs;
1065 #else
1066 bcopy(&fs, & ((ipfw_insn_pipe *)cmd)->pipe_ptr, sizeof(fs));
1067 #endif
1068 #else
1069 if (fs != NULL)
1070 rule->pipe_ptr = fs;
1071 #endif
1072 return fs ;
1073 }
1074
1075 /*
1076 * dummynet hook for packets. Below 'pipe' is a pipe or a queue
1077 * depending on whether WF2Q or fixed bw is used.
1078 *
1079 * pipe_nr pipe or queue the packet is destined for.
1080 * dir where shall we send the packet after dummynet.
1081 * m the mbuf with the packet
1082 * ifp the 'ifp' parameter from the caller.
1083 * NULL in ip_input, destination interface in ip_output,
1084 * real_dst in bdg_forward
1085 * ro route parameter (only used in ip_output, NULL otherwise)
1086 * dst destination address, only used by ip_output
1087 * rule matching rule, in case of multiple passes
1088 * flags flags from the caller, only used in ip_output
1089 *
1090 */
1091 static int
1092 dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
1093 {
1094 struct dn_pkt *pkt;
1095 struct dn_flow_set *fs;
1096 struct dn_pipe *pipe ;
1097 u_int64_t len = m->m_pkthdr.len ;
1098 struct dn_flow_queue *q = NULL ;
1099 int s = splimp();
1100 int is_pipe;
1101 #if IPFW2
1102 ipfw_insn *cmd = fwa->rule->cmd + fwa->rule->act_ofs;
1103
1104 if (cmd->opcode == O_LOG)
1105 cmd += F_LEN(cmd);
1106 is_pipe = (cmd->opcode == O_PIPE);
1107 #else
1108 is_pipe = (fwa->rule->fw_flg & IP_FW_F_COMMAND) == IP_FW_F_PIPE;
1109 #endif
1110
1111 pipe_nr &= 0xffff ;
1112
1113 /*
1114 * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule.
1115 */
1116 fs = locate_flowset(pipe_nr, fwa->rule);
1117 if (fs == NULL)
1118 goto dropit ; /* this queue/pipe does not exist! */
1119 pipe = fs->pipe ;
1120 if (pipe == NULL) { /* must be a queue, try find a matching pipe */
1121 for (pipe = all_pipes; pipe && pipe->pipe_nr != fs->parent_nr;
1122 pipe = pipe->next)
1123 ;
1124 if (pipe != NULL)
1125 fs->pipe = pipe ;
1126 else {
1127 printf("dummynet: no pipe %d for queue %d, drop pkt\n",
1128 fs->parent_nr, fs->fs_nr);
1129 goto dropit ;
1130 }
1131 }
1132 q = find_queue(fs, &(fwa->f_id));
1133 if ( q == NULL )
1134 goto dropit ; /* cannot allocate queue */
1135 /*
1136 * update statistics, then check reasons to drop pkt
1137 */
1138 q->tot_bytes += len ;
1139 q->tot_pkts++ ;
1140 if ( fs->plr && random() < fs->plr )
1141 goto dropit ; /* random pkt drop */
1142 if ( fs->flags_fs & DN_QSIZE_IS_BYTES) {
1143 if (q->len_bytes > fs->qsize)
1144 goto dropit ; /* queue size overflow */
1145 } else {
1146 if (q->len >= fs->qsize)
1147 goto dropit ; /* queue count overflow */
1148 }
1149 if ( fs->flags_fs & DN_IS_RED && red_drops(fs, q, len) )
1150 goto dropit ;
1151
1152 /* XXX expensive to zero, see if we can remove it*/
1153 pkt = (struct dn_pkt *)malloc(sizeof (*pkt), M_DUMMYNET, M_NOWAIT|M_ZERO);
1154 if ( pkt == NULL )
1155 goto dropit ; /* cannot allocate packet header */
1156 /* ok, i can handle the pkt now... */
1157 /* build and enqueue packet + parameters */
1158 pkt->hdr.mh_type = MT_TAG;
1159 pkt->hdr.mh_flags = PACKET_TAG_DUMMYNET;
1160 pkt->rule = fwa->rule ;
1161 DN_NEXT(pkt) = NULL;
1162 pkt->dn_m = m;
1163 pkt->dn_dir = dir ;
1164
1165 pkt->ifp = fwa->oif;
1166 if (dir == DN_TO_IP_OUT) {
1167 /*
1168 * We need to copy *ro because for ICMP pkts (and maybe others)
1169 * the caller passed a pointer into the stack; dst might also be
1170 * a pointer into *ro so it needs to be updated.
1171 */
1172 pkt->ro = *(fwa->ro);
1173 if (fwa->ro->ro_rt)
1174 fwa->ro->ro_rt->rt_refcnt++ ;
1175 if (fwa->dst == (struct sockaddr_in *)&fwa->ro->ro_dst) /* dst points into ro */
1176 fwa->dst = (struct sockaddr_in *)&(pkt->ro.ro_dst) ;
1177
1178 pkt->dn_dst = fwa->dst;
1179 pkt->flags = fwa->flags;
1180 }
1181 if (q->head == NULL)
1182 q->head = pkt;
1183 else
1184 DN_NEXT(q->tail) = pkt;
1185 q->tail = pkt;
1186 q->len++;
1187 q->len_bytes += len ;
1188
1189 if ( q->head != pkt ) /* flow was not idle, we are done */
1190 goto done;
1191 /*
1192 * If we reach this point the flow was previously idle, so we need
1193 * to schedule it. This involves different actions for fixed-rate or
1194 * WF2Q queues.
1195 */
1196 if (is_pipe) {
1197 /*
1198 * Fixed-rate queue: just insert into the ready_heap.
1199 */
1200 dn_key t = 0 ;
1201 if (pipe->bandwidth)
1202 t = SET_TICKS(pkt, q, pipe);
1203 q->sched_time = curr_time ;
1204 if (t == 0) /* must process it now */
1205 ready_event( q );
1206 else
1207 heap_insert(&ready_heap, curr_time + t , q );
1208 } else {
1209 /*
1210 * WF2Q. First, compute start time S: if the flow was idle (S=F+1)
1211 * set S to the virtual time V for the controlling pipe, and update
1212 * the sum of weights for the pipe; otherwise, remove flow from
1213 * idle_heap and set S to max(F,V).
1214 * Second, compute finish time F = S + len/weight.
1215 * Third, if pipe was idle, update V=max(S, V).
1216 * Fourth, count one more backlogged flow.
1217 */
1218 if (DN_KEY_GT(q->S, q->F)) { /* means timestamps are invalid */
1219 q->S = pipe->V ;
1220 pipe->sum += fs->weight ; /* add weight of new queue */
1221 } else {
1222 heap_extract(&(pipe->idle_heap), q);
1223 q->S = MAX64(q->F, pipe->V ) ;
1224 }
1225 q->F = q->S + ( len<<MY_M )/(u_int64_t) fs->weight;
1226
1227 if (pipe->not_eligible_heap.elements == 0 &&
1228 pipe->scheduler_heap.elements == 0)
1229 pipe->V = MAX64 ( q->S, pipe->V );
1230 fs->backlogged++ ;
1231 /*
1232 * Look at eligibility. A flow is not eligibile if S>V (when
1233 * this happens, it means that there is some other flow already
1234 * scheduled for the same pipe, so the scheduler_heap cannot be
1235 * empty). If the flow is not eligible we just store it in the
1236 * not_eligible_heap. Otherwise, we store in the scheduler_heap
1237 * and possibly invoke ready_event_wfq() right now if there is
1238 * leftover credit.
1239 * Note that for all flows in scheduler_heap (SCH), S_i <= V,
1240 * and for all flows in not_eligible_heap (NEH), S_i > V .
1241 * So when we need to compute max( V, min(S_i) ) forall i in SCH+NEH,
1242 * we only need to look into NEH.
1243 */
1244 if (DN_KEY_GT(q->S, pipe->V) ) { /* not eligible */
1245 if (pipe->scheduler_heap.elements == 0)
1246 printf("dummynet: ++ ouch! not eligible but empty scheduler!\n");
1247 heap_insert(&(pipe->not_eligible_heap), q->S, q);
1248 } else {
1249 heap_insert(&(pipe->scheduler_heap), q->F, q);
1250 if (pipe->numbytes >= 0) { /* pipe is idle */
1251 if (pipe->scheduler_heap.elements != 1)
1252 printf("dummynet: OUCH! pipe should have been idle!\n");
1253 DEB(printf("dummynet: waking up pipe %d at %d\n",
1254 pipe->pipe_nr, (int)(q->F >> MY_M)); )
1255 pipe->sched_time = curr_time ;
1256 ready_event_wfq(pipe);
1257 }
1258 }
1259 }
1260 done:
1261 splx(s);
1262 return 0;
1263
1264 dropit:
1265 splx(s);
1266 if (q)
1267 q->drops++ ;
1268 m_freem(m);
1269 return ( (fs && (fs->flags_fs & DN_NOERROR)) ? 0 : ENOBUFS);
1270 }
1271
1272 /*
1273 * Below, the rt_unref is only needed when (pkt->dn_dir == DN_TO_IP_OUT)
1274 * Doing this would probably save us the initial bzero of dn_pkt
1275 */
1276 #define DN_FREE_PKT(pkt) { \
1277 struct dn_pkt *n = pkt ; \
1278 rt_unref ( n->ro.ro_rt ) ; \
1279 m_freem(n->dn_m); \
1280 pkt = DN_NEXT(n) ; \
1281 free(n, M_DUMMYNET) ; }
1282
1283 /*
1284 * Dispose all packets and flow_queues on a flow_set.
1285 * If all=1, also remove red lookup table and other storage,
1286 * including the descriptor itself.
1287 * For the one in dn_pipe MUST also cleanup ready_heap...
1288 */
1289 static void
1290 purge_flow_set(struct dn_flow_set *fs, int all)
1291 {
1292 struct dn_pkt *pkt ;
1293 struct dn_flow_queue *q, *qn ;
1294 int i ;
1295
1296 for (i = 0 ; i <= fs->rq_size ; i++ ) {
1297 for (q = fs->rq[i] ; q ; q = qn ) {
1298 for (pkt = q->head ; pkt ; )
1299 DN_FREE_PKT(pkt) ;
1300 qn = q->next ;
1301 free(q, M_DUMMYNET);
1302 }
1303 fs->rq[i] = NULL ;
1304 }
1305 fs->rq_elements = 0 ;
1306 if (all) {
1307 /* RED - free lookup table */
1308 if (fs->w_q_lookup)
1309 free(fs->w_q_lookup, M_DUMMYNET);
1310 if (fs->rq)
1311 free(fs->rq, M_DUMMYNET);
1312 /* if this fs is not part of a pipe, free it */
1313 if (fs->pipe && fs != &(fs->pipe->fs) )
1314 free(fs, M_DUMMYNET);
1315 }
1316 }
1317
1318 /*
1319 * Dispose all packets queued on a pipe (not a flow_set).
1320 * Also free all resources associated to a pipe, which is about
1321 * to be deleted.
1322 */
1323 static void
1324 purge_pipe(struct dn_pipe *pipe)
1325 {
1326 struct dn_pkt *pkt ;
1327
1328 purge_flow_set( &(pipe->fs), 1 );
1329
1330 for (pkt = pipe->head ; pkt ; )
1331 DN_FREE_PKT(pkt) ;
1332
1333 heap_free( &(pipe->scheduler_heap) );
1334 heap_free( &(pipe->not_eligible_heap) );
1335 heap_free( &(pipe->idle_heap) );
1336 }
1337
1338 /*
1339 * Delete all pipes and heaps returning memory. Must also
1340 * remove references from all ipfw rules to all pipes.
1341 */
1342 static void
1343 dummynet_flush()
1344 {
1345 struct dn_pipe *curr_p, *p ;
1346 struct dn_flow_set *fs, *curr_fs;
1347 int s ;
1348
1349 s = splimp() ;
1350
1351 /* remove all references to pipes ...*/
1352 flush_pipe_ptrs(NULL);
1353 /* prevent future matches... */
1354 p = all_pipes ;
1355 all_pipes = NULL ;
1356 fs = all_flow_sets ;
1357 all_flow_sets = NULL ;
1358 /* and free heaps so we don't have unwanted events */
1359 heap_free(&ready_heap);
1360 heap_free(&wfq_ready_heap);
1361 heap_free(&extract_heap);
1362 splx(s) ;
1363 /*
1364 * Now purge all queued pkts and delete all pipes
1365 */
1366 /* scan and purge all flow_sets. */
1367 for ( ; fs ; ) {
1368 curr_fs = fs ;
1369 fs = fs->next ;
1370 purge_flow_set(curr_fs, 1);
1371 }
1372 for ( ; p ; ) {
1373 purge_pipe(p);
1374 curr_p = p ;
1375 p = p->next ;
1376 free(curr_p, M_DUMMYNET);
1377 }
1378 }
1379
1380
1381 extern struct ip_fw *ip_fw_default_rule ;
1382 static void
1383 dn_rule_delete_fs(struct dn_flow_set *fs, void *r)
1384 {
1385 int i ;
1386 struct dn_flow_queue *q ;
1387 struct dn_pkt *pkt ;
1388
1389 for (i = 0 ; i <= fs->rq_size ; i++) /* last one is ovflow */
1390 for (q = fs->rq[i] ; q ; q = q->next )
1391 for (pkt = q->head ; pkt ; pkt = DN_NEXT(pkt) )
1392 if (pkt->rule == r)
1393 pkt->rule = ip_fw_default_rule ;
1394 }
1395 /*
1396 * when a firewall rule is deleted, scan all queues and remove the flow-id
1397 * from packets matching this rule.
1398 */
1399 void
1400 dn_rule_delete(void *r)
1401 {
1402 struct dn_pipe *p ;
1403 struct dn_pkt *pkt ;
1404 struct dn_flow_set *fs ;
1405
1406 /*
1407 * If the rule references a queue (dn_flow_set), then scan
1408 * the flow set, otherwise scan pipes. Should do either, but doing
1409 * both does not harm.
1410 */
1411 for ( fs = all_flow_sets ; fs ; fs = fs->next )
1412 dn_rule_delete_fs(fs, r);
1413 for ( p = all_pipes ; p ; p = p->next ) {
1414 fs = &(p->fs) ;
1415 dn_rule_delete_fs(fs, r);
1416 for (pkt = p->head ; pkt ; pkt = DN_NEXT(pkt) )
1417 if (pkt->rule == r)
1418 pkt->rule = ip_fw_default_rule ;
1419 }
1420 }
1421
1422 /*
1423 * setup RED parameters
1424 */
1425 static int
1426 config_red(struct dn_flow_set *p, struct dn_flow_set * x)
1427 {
1428 int i;
1429
1430 x->w_q = p->w_q;
1431 x->min_th = SCALE(p->min_th);
1432 x->max_th = SCALE(p->max_th);
1433 x->max_p = p->max_p;
1434
1435 x->c_1 = p->max_p / (p->max_th - p->min_th);
1436 x->c_2 = SCALE_MUL(x->c_1, SCALE(p->min_th));
1437 if (x->flags_fs & DN_IS_GENTLE_RED) {
1438 x->c_3 = (SCALE(1) - p->max_p) / p->max_th;
1439 x->c_4 = (SCALE(1) - 2 * p->max_p);
1440 }
1441
1442 /* if the lookup table already exist, free and create it again */
1443 if (x->w_q_lookup) {
1444 free(x->w_q_lookup, M_DUMMYNET);
1445 x->w_q_lookup = NULL ;
1446 }
1447 if (red_lookup_depth == 0) {
1448 printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth must be > 0\n");
1449 free(x, M_DUMMYNET);
1450 return EINVAL;
1451 }
1452 x->lookup_depth = red_lookup_depth;
1453 x->w_q_lookup = (u_int *) malloc(x->lookup_depth * sizeof(int),
1454 M_DUMMYNET, M_NOWAIT);
1455 if (x->w_q_lookup == NULL) {
1456 printf("dummynet: sorry, cannot allocate red lookup table\n");
1457 free(x, M_DUMMYNET);
1458 return ENOSPC;
1459 }
1460
1461 /* fill the lookup table with (1 - w_q)^x */
1462 x->lookup_step = p->lookup_step ;
1463 x->lookup_weight = p->lookup_weight ;
1464 x->w_q_lookup[0] = SCALE(1) - x->w_q;
1465 for (i = 1; i < x->lookup_depth; i++)
1466 x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
1467 if (red_avg_pkt_size < 1)
1468 red_avg_pkt_size = 512 ;
1469 x->avg_pkt_size = red_avg_pkt_size ;
1470 if (red_max_pkt_size < 1)
1471 red_max_pkt_size = 1500 ;
1472 x->max_pkt_size = red_max_pkt_size ;
1473 return 0 ;
1474 }
1475
1476 static int
1477 alloc_hash(struct dn_flow_set *x, struct dn_flow_set *pfs)
1478 {
1479 if (x->flags_fs & DN_HAVE_FLOW_MASK) { /* allocate some slots */
1480 int l = pfs->rq_size;
1481
1482 if (l == 0)
1483 l = dn_hash_size;
1484 if (l < 4)
1485 l = 4;
1486 else if (l > DN_MAX_HASH_SIZE)
1487 l = DN_MAX_HASH_SIZE;
1488 x->rq_size = l;
1489 } else /* one is enough for null mask */
1490 x->rq_size = 1;
1491 x->rq = malloc((1 + x->rq_size) * sizeof(struct dn_flow_queue *),
1492 M_DUMMYNET, M_NOWAIT | M_ZERO);
1493 if (x->rq == NULL) {
1494 printf("dummynet: sorry, cannot allocate queue\n");
1495 return ENOSPC;
1496 }
1497 x->rq_elements = 0;
1498 return 0 ;
1499 }
1500
1501 static void
1502 set_fs_parms(struct dn_flow_set *x, struct dn_flow_set *src)
1503 {
1504 x->flags_fs = src->flags_fs;
1505 x->qsize = src->qsize;
1506 x->plr = src->plr;
1507 x->flow_mask = src->flow_mask;
1508 if (x->flags_fs & DN_QSIZE_IS_BYTES) {
1509 if (x->qsize > 1024*1024)
1510 x->qsize = 1024*1024 ;
1511 } else {
1512 if (x->qsize == 0)
1513 x->qsize = 50 ;
1514 if (x->qsize > 100)
1515 x->qsize = 50 ;
1516 }
1517 /* configuring RED */
1518 if ( x->flags_fs & DN_IS_RED )
1519 config_red(src, x) ; /* XXX should check errors */
1520 }
1521
1522 /*
1523 * setup pipe or queue parameters.
1524 */
1525
1526 static int
1527 config_pipe(struct dn_pipe *p)
1528 {
1529 int i, r, s;
1530 struct dn_flow_set *pfs = &(p->fs);
1531 struct dn_flow_queue *q;
1532
1533 /*
1534 * The config program passes parameters as follows:
1535 * bw = bits/second (0 means no limits),
1536 * delay = ms, must be translated into ticks.
1537 * qsize = slots/bytes
1538 */
1539 p->delay = ( p->delay * hz ) / 1000 ;
1540 /* We need either a pipe number or a flow_set number */
1541 if (p->pipe_nr == 0 && pfs->fs_nr == 0)
1542 return EINVAL ;
1543 if (p->pipe_nr != 0 && pfs->fs_nr != 0)
1544 return EINVAL ;
1545 if (p->pipe_nr != 0) { /* this is a pipe */
1546 struct dn_pipe *x, *a, *b;
1547 /* locate pipe */
1548 for (a = NULL , b = all_pipes ; b && b->pipe_nr < p->pipe_nr ;
1549 a = b , b = b->next) ;
1550
1551 if (b == NULL || b->pipe_nr != p->pipe_nr) { /* new pipe */
1552 x = malloc(sizeof(struct dn_pipe), M_DUMMYNET, M_NOWAIT | M_ZERO);
1553 if (x == NULL) {
1554 printf("dummynet: no memory for new pipe\n");
1555 return ENOSPC;
1556 }
1557 x->pipe_nr = p->pipe_nr;
1558 x->fs.pipe = x ;
1559 /* idle_heap is the only one from which we extract from the middle.
1560 */
1561 x->idle_heap.size = x->idle_heap.elements = 0 ;
1562 x->idle_heap.offset=OFFSET_OF(struct dn_flow_queue, heap_pos);
1563 } else {
1564 x = b;
1565 s = splimp();
1566 /* Flush accumulated credit for all queues */
1567 for (i = 0; i <= x->fs.rq_size; i++)
1568 for (q = x->fs.rq[i]; q; q = q->next)
1569 q->numbytes = 0;
1570 splx(s);
1571 }
1572
1573 s = splimp();
1574 x->bandwidth = p->bandwidth ;
1575 x->numbytes = 0; /* just in case... */
1576 bcopy(p->if_name, x->if_name, sizeof(p->if_name) );
1577 x->ifp = NULL ; /* reset interface ptr */
1578 x->delay = p->delay ;
1579 set_fs_parms(&(x->fs), pfs);
1580
1581
1582 if ( x->fs.rq == NULL ) { /* a new pipe */
1583 r = alloc_hash(&(x->fs), pfs) ;
1584 if (r) {
1585 free(x, M_DUMMYNET);
1586 splx(s);
1587 return r ;
1588 }
1589 x->next = b ;
1590 if (a == NULL)
1591 all_pipes = x ;
1592 else
1593 a->next = x ;
1594 }
1595 splx(s);
1596 } else { /* config queue */
1597 struct dn_flow_set *x, *a, *b ;
1598
1599 /* locate flow_set */
1600 for (a=NULL, b=all_flow_sets ; b && b->fs_nr < pfs->fs_nr ;
1601 a = b , b = b->next) ;
1602
1603 if (b == NULL || b->fs_nr != pfs->fs_nr) { /* new */
1604 if (pfs->parent_nr == 0) /* need link to a pipe */
1605 return EINVAL ;
1606 x = malloc(sizeof(struct dn_flow_set), M_DUMMYNET, M_NOWAIT|M_ZERO);
1607 if (x == NULL) {
1608 printf("dummynet: no memory for new flow_set\n");
1609 return ENOSPC;
1610 }
1611 x->fs_nr = pfs->fs_nr;
1612 x->parent_nr = pfs->parent_nr;
1613 x->weight = pfs->weight ;
1614 if (x->weight == 0)
1615 x->weight = 1 ;
1616 else if (x->weight > 100)
1617 x->weight = 100 ;
1618 } else {
1619 /* Change parent pipe not allowed; must delete and recreate */
1620 if (pfs->parent_nr != 0 && b->parent_nr != pfs->parent_nr)
1621 return EINVAL ;
1622 x = b;
1623 }
1624 s = splimp();
1625 set_fs_parms(x, pfs);
1626
1627 if ( x->rq == NULL ) { /* a new flow_set */
1628 r = alloc_hash(x, pfs) ;
1629 if (r) {
1630 free(x, M_DUMMYNET);
1631 splx(s);
1632 return r ;
1633 }
1634 x->next = b;
1635 if (a == NULL)
1636 all_flow_sets = x;
1637 else
1638 a->next = x;
1639 }
1640 splx(s);
1641 }
1642 return 0 ;
1643 }
1644
1645 /*
1646 * Helper function to remove from a heap queues which are linked to
1647 * a flow_set about to be deleted.
1648 */
1649 static void
1650 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
1651 {
1652 int i = 0, found = 0 ;
1653 for (; i < h->elements ;)
1654 if ( ((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
1655 h->elements-- ;
1656 h->p[i] = h->p[h->elements] ;
1657 found++ ;
1658 } else
1659 i++ ;
1660 if (found)
1661 heapify(h);
1662 }
1663
1664 /*
1665 * helper function to remove a pipe from a heap (can be there at most once)
1666 */
1667 static void
1668 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
1669 {
1670 if (h->elements > 0) {
1671 int i = 0 ;
1672 for (i=0; i < h->elements ; i++ ) {
1673 if (h->p[i].object == p) { /* found it */
1674 h->elements-- ;
1675 h->p[i] = h->p[h->elements] ;
1676 heapify(h);
1677 break ;
1678 }
1679 }
1680 }
1681 }
1682
1683 /*
1684 * drain all queues. Called in case of severe mbuf shortage.
1685 */
1686 void
1687 dummynet_drain()
1688 {
1689 struct dn_flow_set *fs;
1690 struct dn_pipe *p;
1691 struct dn_pkt *pkt;
1692
1693 heap_free(&ready_heap);
1694 heap_free(&wfq_ready_heap);
1695 heap_free(&extract_heap);
1696 /* remove all references to this pipe from flow_sets */
1697 for (fs = all_flow_sets; fs; fs= fs->next )
1698 purge_flow_set(fs, 0);
1699
1700 for (p = all_pipes; p; p= p->next ) {
1701 purge_flow_set(&(p->fs), 0);
1702 for (pkt = p->head ; pkt ; )
1703 DN_FREE_PKT(pkt) ;
1704 p->head = p->tail = NULL ;
1705 }
1706 }
1707
1708 /*
1709 * Fully delete a pipe or a queue, cleaning up associated info.
1710 */
1711 static int
1712 delete_pipe(struct dn_pipe *p)
1713 {
1714 int s ;
1715
1716 if (p->pipe_nr == 0 && p->fs.fs_nr == 0)
1717 return EINVAL ;
1718 if (p->pipe_nr != 0 && p->fs.fs_nr != 0)
1719 return EINVAL ;
1720 if (p->pipe_nr != 0) { /* this is an old-style pipe */
1721 struct dn_pipe *a, *b;
1722 struct dn_flow_set *fs;
1723
1724 /* locate pipe */
1725 for (a = NULL , b = all_pipes ; b && b->pipe_nr < p->pipe_nr ;
1726 a = b , b = b->next) ;
1727 if (b == NULL || (b->pipe_nr != p->pipe_nr) )
1728 return EINVAL ; /* not found */
1729
1730 s = splimp() ;
1731
1732 /* unlink from list of pipes */
1733 if (a == NULL)
1734 all_pipes = b->next ;
1735 else
1736 a->next = b->next ;
1737 /* remove references to this pipe from the ip_fw rules. */
1738 flush_pipe_ptrs(&(b->fs));
1739
1740 /* remove all references to this pipe from flow_sets */
1741 for (fs = all_flow_sets; fs; fs= fs->next )
1742 if (fs->pipe == b) {
1743 printf("dummynet: ++ ref to pipe %d from fs %d\n",
1744 p->pipe_nr, fs->fs_nr);
1745 fs->pipe = NULL ;
1746 purge_flow_set(fs, 0);
1747 }
1748 fs_remove_from_heap(&ready_heap, &(b->fs));
1749 purge_pipe(b); /* remove all data associated to this pipe */
1750 /* remove reference to here from extract_heap and wfq_ready_heap */
1751 pipe_remove_from_heap(&extract_heap, b);
1752 pipe_remove_from_heap(&wfq_ready_heap, b);
1753 splx(s);
1754 free(b, M_DUMMYNET);
1755 } else { /* this is a WF2Q queue (dn_flow_set) */
1756 struct dn_flow_set *a, *b;
1757
1758 /* locate set */
1759 for (a = NULL, b = all_flow_sets ; b && b->fs_nr < p->fs.fs_nr ;
1760 a = b , b = b->next) ;
1761 if (b == NULL || (b->fs_nr != p->fs.fs_nr) )
1762 return EINVAL ; /* not found */
1763
1764 s = splimp() ;
1765 if (a == NULL)
1766 all_flow_sets = b->next ;
1767 else
1768 a->next = b->next ;
1769 /* remove references to this flow_set from the ip_fw rules. */
1770 flush_pipe_ptrs(b);
1771
1772 if (b->pipe != NULL) {
1773 /* Update total weight on parent pipe and cleanup parent heaps */
1774 b->pipe->sum -= b->weight * b->backlogged ;
1775 fs_remove_from_heap(&(b->pipe->not_eligible_heap), b);
1776 fs_remove_from_heap(&(b->pipe->scheduler_heap), b);
1777 #if 1 /* XXX should i remove from idle_heap as well ? */
1778 fs_remove_from_heap(&(b->pipe->idle_heap), b);
1779 #endif
1780 }
1781 purge_flow_set(b, 1);
1782 splx(s);
1783 }
1784 return 0 ;
1785 }
1786
1787 /*
1788 * helper function used to copy data from kernel in DUMMYNET_GET
1789 */
1790 static char *
1791 dn_copy_set(struct dn_flow_set *set, char *bp)
1792 {
1793 int i, copied = 0 ;
1794 struct dn_flow_queue *q, *qp = (struct dn_flow_queue *)bp;
1795
1796 for (i = 0 ; i <= set->rq_size ; i++)
1797 for (q = set->rq[i] ; q ; q = q->next, qp++ ) {
1798 if (q->hash_slot != i)
1799 printf("dummynet: ++ at %d: wrong slot (have %d, "
1800 "should be %d)\n", copied, q->hash_slot, i);
1801 if (q->fs != set)
1802 printf("dummynet: ++ at %d: wrong fs ptr (have %p, should be %p)\n",
1803 i, q->fs, set);
1804 copied++ ;
1805 bcopy(q, qp, sizeof( *q ) );
1806 /* cleanup pointers */
1807 qp->next = NULL ;
1808 qp->head = qp->tail = NULL ;
1809 qp->fs = NULL ;
1810 }
1811 if (copied != set->rq_elements)
1812 printf("dummynet: ++ wrong count, have %d should be %d\n",
1813 copied, set->rq_elements);
1814 return (char *)qp ;
1815 }
1816
1817 static int
1818 dummynet_get(struct sockopt *sopt)
1819 {
1820 char *buf, *bp ; /* bp is the "copy-pointer" */
1821 size_t size ;
1822 struct dn_flow_set *set ;
1823 struct dn_pipe *p ;
1824 int s, error=0 ;
1825
1826 s = splimp();
1827 /*
1828 * compute size of data structures: list of pipes and flow_sets.
1829 */
1830 for (p = all_pipes, size = 0 ; p ; p = p->next )
1831 size += sizeof( *p ) +
1832 p->fs.rq_elements * sizeof(struct dn_flow_queue);
1833 for (set = all_flow_sets ; set ; set = set->next )
1834 size += sizeof ( *set ) +
1835 set->rq_elements * sizeof(struct dn_flow_queue);
1836 buf = malloc(size, M_TEMP, M_NOWAIT);
1837 if (buf == 0) {
1838 splx(s);
1839 return ENOBUFS ;
1840 }
1841 for (p = all_pipes, bp = buf ; p ; p = p->next ) {
1842 struct dn_pipe *pipe_bp = (struct dn_pipe *)bp ;
1843
1844 /*
1845 * copy pipe descriptor into *bp, convert delay back to ms,
1846 * then copy the flow_set descriptor(s) one at a time.
1847 * After each flow_set, copy the queue descriptor it owns.
1848 */
1849 bcopy(p, bp, sizeof( *p ) );
1850 pipe_bp->delay = (pipe_bp->delay * 1000) / hz ;
1851 /*
1852 * XXX the following is a hack based on ->next being the
1853 * first field in dn_pipe and dn_flow_set. The correct
1854 * solution would be to move the dn_flow_set to the beginning
1855 * of struct dn_pipe.
1856 */
1857 pipe_bp->next = (struct dn_pipe *)DN_IS_PIPE ;
1858 /* clean pointers */
1859 pipe_bp->head = pipe_bp->tail = NULL ;
1860 pipe_bp->fs.next = NULL ;
1861 pipe_bp->fs.pipe = NULL ;
1862 pipe_bp->fs.rq = NULL ;
1863
1864 bp += sizeof( *p ) ;
1865 bp = dn_copy_set( &(p->fs), bp );
1866 }
1867 for (set = all_flow_sets ; set ; set = set->next ) {
1868 struct dn_flow_set *fs_bp = (struct dn_flow_set *)bp ;
1869 bcopy(set, bp, sizeof( *set ) );
1870 /* XXX same hack as above */
1871 fs_bp->next = (struct dn_flow_set *)DN_IS_QUEUE ;
1872 fs_bp->pipe = NULL ;
1873 fs_bp->rq = NULL ;
1874 bp += sizeof( *set ) ;
1875 bp = dn_copy_set( set, bp );
1876 }
1877 splx(s);
1878 error = sooptcopyout(sopt, buf, size);
1879 free(buf, M_TEMP);
1880 return error ;
1881 }
1882
1883 /*
1884 * Handler for the various dummynet socket options (get, flush, config, del)
1885 */
1886 static int
1887 ip_dn_ctl(struct sockopt *sopt)
1888 {
1889 int error = 0 ;
1890 struct dn_pipe *p, tmp_pipe;
1891
1892 /* Disallow sets in really-really secure mode. */
1893 if (sopt->sopt_dir == SOPT_SET) {
1894 #if __FreeBSD_version >= 500034
1895 error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
1896 if (error)
1897 return (error);
1898 #else
1899 if (securelevel >= 3)
1900 return (EPERM);
1901 #endif
1902 }
1903
1904 switch (sopt->sopt_name) {
1905 default :
1906 printf("dummynet: -- unknown option %d", sopt->sopt_name);
1907 return EINVAL ;
1908
1909 case IP_DUMMYNET_GET :
1910 error = dummynet_get(sopt);
1911 break ;
1912
1913 case IP_DUMMYNET_FLUSH :
1914 dummynet_flush() ;
1915 break ;
1916
1917 case IP_DUMMYNET_CONFIGURE :
1918 p = &tmp_pipe ;
1919 error = sooptcopyin(sopt, p, sizeof *p, sizeof *p);
1920 if (error)
1921 break ;
1922 error = config_pipe(p);
1923 break ;
1924
1925 case IP_DUMMYNET_DEL : /* remove a pipe or queue */
1926 p = &tmp_pipe ;
1927 error = sooptcopyin(sopt, p, sizeof *p, sizeof *p);
1928 if (error)
1929 break ;
1930
1931 error = delete_pipe(p);
1932 break ;
1933 }
1934 return error ;
1935 }
1936
1937 static void
1938 ip_dn_init(void)
1939 {
1940 printf("DUMMYNET initialized (011031)\n");
1941 all_pipes = NULL ;
1942 all_flow_sets = NULL ;
1943 ready_heap.size = ready_heap.elements = 0 ;
1944 ready_heap.offset = 0 ;
1945
1946 wfq_ready_heap.size = wfq_ready_heap.elements = 0 ;
1947 wfq_ready_heap.offset = 0 ;
1948
1949 extract_heap.size = extract_heap.elements = 0 ;
1950 extract_heap.offset = 0 ;
1951 ip_dn_ctl_ptr = ip_dn_ctl;
1952 ip_dn_io_ptr = dummynet_io;
1953 ip_dn_ruledel_ptr = dn_rule_delete;
1954 bzero(&dn_timeout, sizeof(struct callout_handle));
1955 dn_timeout = timeout(dummynet, NULL, 1);
1956 }
1957
1958 static int
1959 dummynet_modevent(module_t mod, int type, void *data)
1960 {
1961 int s;
1962 switch (type) {
1963 case MOD_LOAD:
1964 s = splimp();
1965 if (DUMMYNET_LOADED) {
1966 splx(s);
1967 printf("DUMMYNET already loaded\n");
1968 return EEXIST ;
1969 }
1970 ip_dn_init();
1971 splx(s);
1972 break;
1973
1974 case MOD_UNLOAD:
1975 #if !defined(KLD_MODULE)
1976 printf("dummynet statically compiled, cannot unload\n");
1977 return EINVAL ;
1978 #else
1979 s = splimp();
1980 untimeout(dummynet, NULL, dn_timeout);
1981 dummynet_flush();
1982 ip_dn_ctl_ptr = NULL;
1983 ip_dn_io_ptr = NULL;
1984 ip_dn_ruledel_ptr = NULL;
1985 splx(s);
1986 #endif
1987 break ;
1988 default:
1989 break ;
1990 }
1991 return 0 ;
1992 }
1993
1994 static moduledata_t dummynet_mod = {
1995 "dummynet",
1996 dummynet_modevent,
1997 NULL
1998 };
1999 DECLARE_MODULE(dummynet, dummynet_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
2000 MODULE_DEPEND(dummynet, ipfw, 1, 1, 1);
2001 MODULE_VERSION(dummynet, 1);
Cache object: 053478e90512f7fd10b79cbeb003ab97
|