1 /*-
2 * Copyright (c) 2004-2010 University of Zagreb
3 * Copyright (c) 2007-2008 FreeBSD Foundation
4 *
5 * This software was developed by the University of Zagreb and the
6 * FreeBSD Foundation under sponsorship by the Stichting NLnet and the
7 * FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $FreeBSD: releng/8.3/sys/netgraph/ng_pipe.c 222347 2011-05-27 08:43:59Z zec $
31 */
32
33 /*
34 * This node permits simple traffic shaping by emulating bandwidth
35 * and delay, as well as random packet losses.
36 * The node has two hooks, upper and lower. Traffic flowing from upper to
37 * lower hook is referenced as downstream, and vice versa. Parameters for
38 * both directions can be set separately, except for delay.
39 */
40
41
42 #include <sys/param.h>
43 #include <sys/errno.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/time.h>
49
50 #include <vm/uma.h>
51
52 #include <net/vnet.h>
53
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/ip.h>
57
58 #include <netgraph/ng_message.h>
59 #include <netgraph/netgraph.h>
60 #include <netgraph/ng_parse.h>
61 #include <netgraph/ng_pipe.h>
62
63 static MALLOC_DEFINE(M_NG_PIPE, "ng_pipe", "ng_pipe");
64
65 /* Packet header struct */
66 struct ngp_hdr {
67 TAILQ_ENTRY(ngp_hdr) ngp_link; /* next pkt in queue */
68 struct timeval when; /* this packet's due time */
69 struct mbuf *m; /* ptr to the packet data */
70 };
71 TAILQ_HEAD(p_head, ngp_hdr);
72
73 /* FIFO queue struct */
74 struct ngp_fifo {
75 TAILQ_ENTRY(ngp_fifo) fifo_le; /* list of active queues only */
76 struct p_head packet_head; /* FIFO queue head */
77 u_int32_t hash; /* flow signature */
78 struct timeval vtime; /* virtual time, for WFQ */
79 u_int32_t rr_deficit; /* for DRR */
80 u_int32_t packets; /* # of packets in this queue */
81 };
82
83 /* Per hook info */
84 struct hookinfo {
85 hook_p hook;
86 int noqueue; /* bypass any processing */
87 TAILQ_HEAD(, ngp_fifo) fifo_head; /* FIFO queues */
88 TAILQ_HEAD(, ngp_hdr) qout_head; /* delay queue head */
89 struct timeval qin_utime;
90 struct ng_pipe_hookcfg cfg;
91 struct ng_pipe_hookrun run;
92 struct ng_pipe_hookstat stats;
93 uint64_t *ber_p; /* loss_p(BER,psize) map */
94 };
95
96 /* Per node info */
97 struct node_priv {
98 u_int64_t delay;
99 u_int32_t overhead;
100 u_int32_t header_offset;
101 struct hookinfo lower;
102 struct hookinfo upper;
103 struct callout timer;
104 int timer_scheduled;
105 };
106 typedef struct node_priv *priv_p;
107
108 /* Macro for calculating the virtual time for packet dequeueing in WFQ */
109 #define FIFO_VTIME_SORT(plen) \
110 if (hinfo->cfg.wfq && hinfo->cfg.bandwidth) { \
111 ngp_f->vtime.tv_usec = now->tv_usec + ((uint64_t) (plen) \
112 + priv->overhead ) * hinfo->run.fifo_queues * \
113 8000000 / hinfo->cfg.bandwidth; \
114 ngp_f->vtime.tv_sec = now->tv_sec + \
115 ngp_f->vtime.tv_usec / 1000000; \
116 ngp_f->vtime.tv_usec = ngp_f->vtime.tv_usec % 1000000; \
117 TAILQ_FOREACH(ngp_f1, &hinfo->fifo_head, fifo_le) \
118 if (ngp_f1->vtime.tv_sec > ngp_f->vtime.tv_sec || \
119 (ngp_f1->vtime.tv_sec == ngp_f->vtime.tv_sec && \
120 ngp_f1->vtime.tv_usec > ngp_f->vtime.tv_usec)) \
121 break; \
122 if (ngp_f1 == NULL) \
123 TAILQ_INSERT_TAIL(&hinfo->fifo_head, ngp_f, fifo_le); \
124 else \
125 TAILQ_INSERT_BEFORE(ngp_f1, ngp_f, fifo_le); \
126 } else \
127 TAILQ_INSERT_TAIL(&hinfo->fifo_head, ngp_f, fifo_le); \
128
129
130 static void parse_cfg(struct ng_pipe_hookcfg *, struct ng_pipe_hookcfg *,
131 struct hookinfo *, priv_p);
132 static void pipe_dequeue(struct hookinfo *, struct timeval *);
133 static void ngp_callout(node_p, hook_p, void *, int);
134 static int ngp_modevent(module_t, int, void *);
135
136 /* zone for storing ngp_hdr-s */
137 static uma_zone_t ngp_zone;
138
139 /* Netgraph methods */
140 static ng_constructor_t ngp_constructor;
141 static ng_rcvmsg_t ngp_rcvmsg;
142 static ng_shutdown_t ngp_shutdown;
143 static ng_newhook_t ngp_newhook;
144 static ng_rcvdata_t ngp_rcvdata;
145 static ng_disconnect_t ngp_disconnect;
146
147 /* Parse type for struct ng_pipe_hookstat */
148 static const struct ng_parse_struct_field
149 ng_pipe_hookstat_type_fields[] = NG_PIPE_HOOKSTAT_INFO;
150 static const struct ng_parse_type ng_pipe_hookstat_type = {
151 &ng_parse_struct_type,
152 &ng_pipe_hookstat_type_fields
153 };
154
155 /* Parse type for struct ng_pipe_stats */
156 static const struct ng_parse_struct_field ng_pipe_stats_type_fields[] =
157 NG_PIPE_STATS_INFO(&ng_pipe_hookstat_type);
158 static const struct ng_parse_type ng_pipe_stats_type = {
159 &ng_parse_struct_type,
160 &ng_pipe_stats_type_fields
161 };
162
163 /* Parse type for struct ng_pipe_hookrun */
164 static const struct ng_parse_struct_field
165 ng_pipe_hookrun_type_fields[] = NG_PIPE_HOOKRUN_INFO;
166 static const struct ng_parse_type ng_pipe_hookrun_type = {
167 &ng_parse_struct_type,
168 &ng_pipe_hookrun_type_fields
169 };
170
171 /* Parse type for struct ng_pipe_run */
172 static const struct ng_parse_struct_field
173 ng_pipe_run_type_fields[] = NG_PIPE_RUN_INFO(&ng_pipe_hookrun_type);
174 static const struct ng_parse_type ng_pipe_run_type = {
175 &ng_parse_struct_type,
176 &ng_pipe_run_type_fields
177 };
178
179 /* Parse type for struct ng_pipe_hookcfg */
180 static const struct ng_parse_struct_field
181 ng_pipe_hookcfg_type_fields[] = NG_PIPE_HOOKCFG_INFO;
182 static const struct ng_parse_type ng_pipe_hookcfg_type = {
183 &ng_parse_struct_type,
184 &ng_pipe_hookcfg_type_fields
185 };
186
187 /* Parse type for struct ng_pipe_cfg */
188 static const struct ng_parse_struct_field
189 ng_pipe_cfg_type_fields[] = NG_PIPE_CFG_INFO(&ng_pipe_hookcfg_type);
190 static const struct ng_parse_type ng_pipe_cfg_type = {
191 &ng_parse_struct_type,
192 &ng_pipe_cfg_type_fields
193 };
194
195 /* List of commands and how to convert arguments to/from ASCII */
196 static const struct ng_cmdlist ngp_cmds[] = {
197 {
198 .cookie = NGM_PIPE_COOKIE,
199 .cmd = NGM_PIPE_GET_STATS,
200 .name = "getstats",
201 .respType = &ng_pipe_stats_type
202 },
203 {
204 .cookie = NGM_PIPE_COOKIE,
205 .cmd = NGM_PIPE_CLR_STATS,
206 .name = "clrstats"
207 },
208 {
209 .cookie = NGM_PIPE_COOKIE,
210 .cmd = NGM_PIPE_GETCLR_STATS,
211 .name = "getclrstats",
212 .respType = &ng_pipe_stats_type
213 },
214 {
215 .cookie = NGM_PIPE_COOKIE,
216 .cmd = NGM_PIPE_GET_RUN,
217 .name = "getrun",
218 .respType = &ng_pipe_run_type
219 },
220 {
221 .cookie = NGM_PIPE_COOKIE,
222 .cmd = NGM_PIPE_GET_CFG,
223 .name = "getcfg",
224 .respType = &ng_pipe_cfg_type
225 },
226 {
227 .cookie = NGM_PIPE_COOKIE,
228 .cmd = NGM_PIPE_SET_CFG,
229 .name = "setcfg",
230 .mesgType = &ng_pipe_cfg_type,
231 },
232 { 0 }
233 };
234
235 /* Netgraph type descriptor */
236 static struct ng_type ng_pipe_typestruct = {
237 .version = NG_ABI_VERSION,
238 .name = NG_PIPE_NODE_TYPE,
239 .mod_event = ngp_modevent,
240 .constructor = ngp_constructor,
241 .shutdown = ngp_shutdown,
242 .rcvmsg = ngp_rcvmsg,
243 .newhook = ngp_newhook,
244 .rcvdata = ngp_rcvdata,
245 .disconnect = ngp_disconnect,
246 .cmdlist = ngp_cmds
247 };
248 NETGRAPH_INIT(pipe, &ng_pipe_typestruct);
249
250 /* Node constructor */
251 static int
252 ngp_constructor(node_p node)
253 {
254 priv_p priv;
255
256 priv = malloc(sizeof(*priv), M_NG_PIPE, M_ZERO | M_NOWAIT);
257 if (priv == NULL)
258 return (ENOMEM);
259 NG_NODE_SET_PRIVATE(node, priv);
260
261 /* Mark node as single-threaded */
262 NG_NODE_FORCE_WRITER(node);
263
264 ng_callout_init(&priv->timer);
265
266 return (0);
267 }
268
269 /* Add a hook */
270 static int
271 ngp_newhook(node_p node, hook_p hook, const char *name)
272 {
273 const priv_p priv = NG_NODE_PRIVATE(node);
274 struct hookinfo *hinfo;
275
276 if (strcmp(name, NG_PIPE_HOOK_UPPER) == 0) {
277 bzero(&priv->upper, sizeof(priv->upper));
278 priv->upper.hook = hook;
279 NG_HOOK_SET_PRIVATE(hook, &priv->upper);
280 } else if (strcmp(name, NG_PIPE_HOOK_LOWER) == 0) {
281 bzero(&priv->lower, sizeof(priv->lower));
282 priv->lower.hook = hook;
283 NG_HOOK_SET_PRIVATE(hook, &priv->lower);
284 } else
285 return (EINVAL);
286
287 /* Load non-zero initial cfg values */
288 hinfo = NG_HOOK_PRIVATE(hook);
289 hinfo->cfg.qin_size_limit = 50;
290 hinfo->cfg.fifo = 1;
291 hinfo->cfg.droptail = 1;
292 TAILQ_INIT(&hinfo->fifo_head);
293 TAILQ_INIT(&hinfo->qout_head);
294 return (0);
295 }
296
297 /* Receive a control message */
298 static int
299 ngp_rcvmsg(node_p node, item_p item, hook_p lasthook)
300 {
301 const priv_p priv = NG_NODE_PRIVATE(node);
302 struct ng_mesg *resp = NULL;
303 struct ng_mesg *msg, *flow_msg;
304 struct ng_pipe_stats *stats;
305 struct ng_pipe_run *run;
306 struct ng_pipe_cfg *cfg;
307 int error = 0;
308 int prev_down, now_down, cmd;
309
310 NGI_GET_MSG(item, msg);
311 switch (msg->header.typecookie) {
312 case NGM_PIPE_COOKIE:
313 switch (msg->header.cmd) {
314 case NGM_PIPE_GET_STATS:
315 case NGM_PIPE_CLR_STATS:
316 case NGM_PIPE_GETCLR_STATS:
317 if (msg->header.cmd != NGM_PIPE_CLR_STATS) {
318 NG_MKRESPONSE(resp, msg,
319 sizeof(*stats), M_NOWAIT);
320 if (resp == NULL) {
321 error = ENOMEM;
322 break;
323 }
324 stats = (struct ng_pipe_stats *) resp->data;
325 bcopy(&priv->upper.stats, &stats->downstream,
326 sizeof(stats->downstream));
327 bcopy(&priv->lower.stats, &stats->upstream,
328 sizeof(stats->upstream));
329 }
330 if (msg->header.cmd != NGM_PIPE_GET_STATS) {
331 bzero(&priv->upper.stats,
332 sizeof(priv->upper.stats));
333 bzero(&priv->lower.stats,
334 sizeof(priv->lower.stats));
335 }
336 break;
337 case NGM_PIPE_GET_RUN:
338 NG_MKRESPONSE(resp, msg, sizeof(*run), M_NOWAIT);
339 if (resp == NULL) {
340 error = ENOMEM;
341 break;
342 }
343 run = (struct ng_pipe_run *) resp->data;
344 bcopy(&priv->upper.run, &run->downstream,
345 sizeof(run->downstream));
346 bcopy(&priv->lower.run, &run->upstream,
347 sizeof(run->upstream));
348 break;
349 case NGM_PIPE_GET_CFG:
350 NG_MKRESPONSE(resp, msg, sizeof(*cfg), M_NOWAIT);
351 if (resp == NULL) {
352 error = ENOMEM;
353 break;
354 }
355 cfg = (struct ng_pipe_cfg *) resp->data;
356 bcopy(&priv->upper.cfg, &cfg->downstream,
357 sizeof(cfg->downstream));
358 bcopy(&priv->lower.cfg, &cfg->upstream,
359 sizeof(cfg->upstream));
360 cfg->delay = priv->delay;
361 cfg->overhead = priv->overhead;
362 cfg->header_offset = priv->header_offset;
363 if (cfg->upstream.bandwidth ==
364 cfg->downstream.bandwidth) {
365 cfg->bandwidth = cfg->upstream.bandwidth;
366 cfg->upstream.bandwidth = 0;
367 cfg->downstream.bandwidth = 0;
368 } else
369 cfg->bandwidth = 0;
370 break;
371 case NGM_PIPE_SET_CFG:
372 cfg = (struct ng_pipe_cfg *) msg->data;
373 if (msg->header.arglen != sizeof(*cfg)) {
374 error = EINVAL;
375 break;
376 }
377
378 if (cfg->delay == -1)
379 priv->delay = 0;
380 else if (cfg->delay > 0 && cfg->delay < 10000000)
381 priv->delay = cfg->delay;
382
383 if (cfg->bandwidth == -1) {
384 priv->upper.cfg.bandwidth = 0;
385 priv->lower.cfg.bandwidth = 0;
386 priv->overhead = 0;
387 } else if (cfg->bandwidth >= 100 &&
388 cfg->bandwidth <= 1000000000) {
389 priv->upper.cfg.bandwidth = cfg->bandwidth;
390 priv->lower.cfg.bandwidth = cfg->bandwidth;
391 if (cfg->bandwidth >= 10000000)
392 priv->overhead = 8+4+12; /* Ethernet */
393 else
394 priv->overhead = 10; /* HDLC */
395 }
396
397 if (cfg->overhead == -1)
398 priv->overhead = 0;
399 else if (cfg->overhead > 0 &&
400 cfg->overhead < MAX_OHSIZE)
401 priv->overhead = cfg->overhead;
402
403 if (cfg->header_offset == -1)
404 priv->header_offset = 0;
405 else if (cfg->header_offset > 0 &&
406 cfg->header_offset < 64)
407 priv->header_offset = cfg->header_offset;
408
409 prev_down = priv->upper.cfg.ber == 1 ||
410 priv->lower.cfg.ber == 1;
411 parse_cfg(&priv->upper.cfg, &cfg->downstream,
412 &priv->upper, priv);
413 parse_cfg(&priv->lower.cfg, &cfg->upstream,
414 &priv->lower, priv);
415 now_down = priv->upper.cfg.ber == 1 ||
416 priv->lower.cfg.ber == 1;
417
418 if (prev_down != now_down) {
419 if (now_down)
420 cmd = NGM_LINK_IS_DOWN;
421 else
422 cmd = NGM_LINK_IS_UP;
423
424 if (priv->lower.hook != NULL) {
425 NG_MKMESSAGE(flow_msg, NGM_FLOW_COOKIE,
426 cmd, 0, M_NOWAIT);
427 if (flow_msg != NULL)
428 NG_SEND_MSG_HOOK(error, node,
429 flow_msg, priv->lower.hook,
430 0);
431 }
432 if (priv->upper.hook != NULL) {
433 NG_MKMESSAGE(flow_msg, NGM_FLOW_COOKIE,
434 cmd, 0, M_NOWAIT);
435 if (flow_msg != NULL)
436 NG_SEND_MSG_HOOK(error, node,
437 flow_msg, priv->upper.hook,
438 0);
439 }
440 }
441 break;
442 default:
443 error = EINVAL;
444 break;
445 }
446 break;
447 default:
448 error = EINVAL;
449 break;
450 }
451 NG_RESPOND_MSG(error, node, item, resp);
452 NG_FREE_MSG(msg);
453
454 return (error);
455 }
456
457 static void
458 parse_cfg(struct ng_pipe_hookcfg *current, struct ng_pipe_hookcfg *new,
459 struct hookinfo *hinfo, priv_p priv)
460 {
461
462 if (new->ber == -1) {
463 current->ber = 0;
464 if (hinfo->ber_p) {
465 free(hinfo->ber_p, M_NG_PIPE);
466 hinfo->ber_p = NULL;
467 }
468 } else if (new->ber >= 1 && new->ber <= 1000000000000) {
469 static const uint64_t one = 0x1000000000000; /* = 2^48 */
470 uint64_t p0, p;
471 uint32_t fsize, i;
472
473 if (hinfo->ber_p == NULL)
474 hinfo->ber_p =
475 malloc((MAX_FSIZE + MAX_OHSIZE) * sizeof(uint64_t),
476 M_NG_PIPE, M_NOWAIT);
477 current->ber = new->ber;
478
479 /*
480 * For given BER and each frame size N (in bytes) calculate
481 * the probability P_OK that the frame is clean:
482 *
483 * P_OK(BER,N) = (1 - 1/BER)^(N*8)
484 *
485 * We use a 64-bit fixed-point format with decimal point
486 * positioned between bits 47 and 48.
487 */
488 p0 = one - one / new->ber;
489 p = one;
490 for (fsize = 0; fsize < MAX_FSIZE + MAX_OHSIZE; fsize++) {
491 hinfo->ber_p[fsize] = p;
492 for (i = 0; i < 8; i++)
493 p = (p * (p0 & 0xffff) >> 48) +
494 (p * ((p0 >> 16) & 0xffff) >> 32) +
495 (p * (p0 >> 32) >> 16);
496 }
497 }
498
499 if (new->qin_size_limit == -1)
500 current->qin_size_limit = 0;
501 else if (new->qin_size_limit >= 5)
502 current->qin_size_limit = new->qin_size_limit;
503
504 if (new->qout_size_limit == -1)
505 current->qout_size_limit = 0;
506 else if (new->qout_size_limit >= 5)
507 current->qout_size_limit = new->qout_size_limit;
508
509 if (new->duplicate == -1)
510 current->duplicate = 0;
511 else if (new->duplicate > 0 && new->duplicate <= 50)
512 current->duplicate = new->duplicate;
513
514 if (new->fifo) {
515 current->fifo = 1;
516 current->wfq = 0;
517 current->drr = 0;
518 }
519
520 if (new->wfq) {
521 current->fifo = 0;
522 current->wfq = 1;
523 current->drr = 0;
524 }
525
526 if (new->drr) {
527 current->fifo = 0;
528 current->wfq = 0;
529 /* DRR quantum */
530 if (new->drr >= 32)
531 current->drr = new->drr;
532 else
533 current->drr = 2048; /* default quantum */
534 }
535
536 if (new->droptail) {
537 current->droptail = 1;
538 current->drophead = 0;
539 }
540
541 if (new->drophead) {
542 current->droptail = 0;
543 current->drophead = 1;
544 }
545
546 if (new->bandwidth == -1) {
547 current->bandwidth = 0;
548 current->fifo = 1;
549 current->wfq = 0;
550 current->drr = 0;
551 } else if (new->bandwidth >= 100 && new->bandwidth <= 1000000000)
552 current->bandwidth = new->bandwidth;
553
554 if (current->bandwidth | priv->delay |
555 current->duplicate | current->ber)
556 hinfo->noqueue = 0;
557 else
558 hinfo->noqueue = 1;
559 }
560
561 /*
562 * Compute a hash signature for a packet. This function suffers from the
563 * NIH sindrome, so probably it would be wise to look around what other
564 * folks have found out to be a good and efficient IP hash function...
565 */
566 static int
567 ip_hash(struct mbuf *m, int offset)
568 {
569 u_int64_t i;
570 struct ip *ip = (struct ip *)(mtod(m, u_char *) + offset);
571
572 if (m->m_len < sizeof(struct ip) + offset ||
573 ip->ip_v != 4 || ip->ip_hl << 2 != sizeof(struct ip))
574 return 0;
575
576 i = ((u_int64_t) ip->ip_src.s_addr ^
577 ((u_int64_t) ip->ip_src.s_addr << 13) ^
578 ((u_int64_t) ip->ip_dst.s_addr << 7) ^
579 ((u_int64_t) ip->ip_dst.s_addr << 19));
580 return (i ^ (i >> 32));
581 }
582
583 /*
584 * Receive data on a hook - both in upstream and downstream direction.
585 * We put the frame on the inbound queue, and try to initiate dequeuing
586 * sequence immediately. If inbound queue is full, discard one frame
587 * depending on dropping policy (from the head or from the tail of the
588 * queue).
589 */
590 static int
591 ngp_rcvdata(hook_p hook, item_p item)
592 {
593 struct hookinfo *const hinfo = NG_HOOK_PRIVATE(hook);
594 const priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook));
595 struct timeval uuptime;
596 struct timeval *now = &uuptime;
597 struct ngp_fifo *ngp_f = NULL, *ngp_f1;
598 struct ngp_hdr *ngp_h = NULL;
599 struct mbuf *m;
600 int hash, plen;
601 int error = 0;
602
603 /*
604 * Shortcut from inbound to outbound hook when neither of
605 * bandwidth, delay, BER or duplication probability is
606 * configured, nor we have queued frames to drain.
607 */
608 if (hinfo->run.qin_frames == 0 && hinfo->run.qout_frames == 0 &&
609 hinfo->noqueue) {
610 struct hookinfo *dest;
611 if (hinfo == &priv->lower)
612 dest = &priv->upper;
613 else
614 dest = &priv->lower;
615
616 /* Send the frame. */
617 plen = NGI_M(item)->m_pkthdr.len;
618 NG_FWD_ITEM_HOOK(error, item, dest->hook);
619
620 /* Update stats. */
621 if (error) {
622 hinfo->stats.out_disc_frames++;
623 hinfo->stats.out_disc_octets += plen;
624 } else {
625 hinfo->stats.fwd_frames++;
626 hinfo->stats.fwd_octets += plen;
627 }
628
629 return (error);
630 }
631
632 microuptime(now);
633
634 /*
635 * If this was an empty queue, update service deadline time.
636 */
637 if (hinfo->run.qin_frames == 0) {
638 struct timeval *when = &hinfo->qin_utime;
639 if (when->tv_sec < now->tv_sec || (when->tv_sec == now->tv_sec
640 && when->tv_usec < now->tv_usec)) {
641 when->tv_sec = now->tv_sec;
642 when->tv_usec = now->tv_usec;
643 }
644 }
645
646 /* Populate the packet header */
647 ngp_h = uma_zalloc(ngp_zone, M_NOWAIT);
648 KASSERT((ngp_h != NULL), ("ngp_h zalloc failed (1)"));
649 NGI_GET_M(item, m);
650 KASSERT(m != NULL, ("NGI_GET_M failed"));
651 ngp_h->m = m;
652 NG_FREE_ITEM(item);
653
654 if (hinfo->cfg.fifo)
655 hash = 0; /* all packets go into a single FIFO queue */
656 else
657 hash = ip_hash(m, priv->header_offset);
658
659 /* Find the appropriate FIFO queue for the packet and enqueue it*/
660 TAILQ_FOREACH(ngp_f, &hinfo->fifo_head, fifo_le)
661 if (hash == ngp_f->hash)
662 break;
663 if (ngp_f == NULL) {
664 ngp_f = uma_zalloc(ngp_zone, M_NOWAIT);
665 KASSERT(ngp_h != NULL, ("ngp_h zalloc failed (2)"));
666 TAILQ_INIT(&ngp_f->packet_head);
667 ngp_f->hash = hash;
668 ngp_f->packets = 1;
669 ngp_f->rr_deficit = hinfo->cfg.drr; /* DRR quantum */
670 hinfo->run.fifo_queues++;
671 TAILQ_INSERT_TAIL(&ngp_f->packet_head, ngp_h, ngp_link);
672 FIFO_VTIME_SORT(m->m_pkthdr.len);
673 } else {
674 TAILQ_INSERT_TAIL(&ngp_f->packet_head, ngp_h, ngp_link);
675 ngp_f->packets++;
676 }
677 hinfo->run.qin_frames++;
678 hinfo->run.qin_octets += m->m_pkthdr.len;
679
680 /* Discard a frame if inbound queue limit has been reached */
681 if (hinfo->run.qin_frames > hinfo->cfg.qin_size_limit) {
682 struct mbuf *m1;
683 int longest = 0;
684
685 /* Find the longest queue */
686 TAILQ_FOREACH(ngp_f1, &hinfo->fifo_head, fifo_le)
687 if (ngp_f1->packets > longest) {
688 longest = ngp_f1->packets;
689 ngp_f = ngp_f1;
690 }
691
692 /* Drop a frame from the queue head/tail, depending on cfg */
693 if (hinfo->cfg.drophead)
694 ngp_h = TAILQ_FIRST(&ngp_f->packet_head);
695 else
696 ngp_h = TAILQ_LAST(&ngp_f->packet_head, p_head);
697 TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link);
698 m1 = ngp_h->m;
699 uma_zfree(ngp_zone, ngp_h);
700 hinfo->run.qin_octets -= m1->m_pkthdr.len;
701 hinfo->stats.in_disc_octets += m1->m_pkthdr.len;
702 m_freem(m1);
703 if (--(ngp_f->packets) == 0) {
704 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
705 uma_zfree(ngp_zone, ngp_f);
706 hinfo->run.fifo_queues--;
707 }
708 hinfo->run.qin_frames--;
709 hinfo->stats.in_disc_frames++;
710 } else if (hinfo->run.qin_frames > hinfo->cfg.qin_size_limit) {
711 struct mbuf *m1;
712 int longest = 0;
713
714 /* Find the longest queue */
715 TAILQ_FOREACH(ngp_f1, &hinfo->fifo_head, fifo_le)
716 if (ngp_f1->packets > longest) {
717 longest = ngp_f1->packets;
718 ngp_f = ngp_f1;
719 }
720
721 /* Drop a frame from the queue head/tail, depending on cfg */
722 if (hinfo->cfg.drophead)
723 ngp_h = TAILQ_FIRST(&ngp_f->packet_head);
724 else
725 ngp_h = TAILQ_LAST(&ngp_f->packet_head, p_head);
726 TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link);
727 m1 = ngp_h->m;
728 uma_zfree(ngp_zone, ngp_h);
729 hinfo->run.qin_octets -= m1->m_pkthdr.len;
730 hinfo->stats.in_disc_octets += m1->m_pkthdr.len;
731 m_freem(m1);
732 if (--(ngp_f->packets) == 0) {
733 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
734 uma_zfree(ngp_zone, ngp_f);
735 hinfo->run.fifo_queues--;
736 }
737 hinfo->run.qin_frames--;
738 hinfo->stats.in_disc_frames++;
739 }
740
741 /*
742 * Try to start the dequeuing process immediately.
743 */
744 pipe_dequeue(hinfo, now);
745
746 return (0);
747 }
748
749
750 /*
751 * Dequeueing sequence - we basically do the following:
752 * 1) Try to extract the frame from the inbound (bandwidth) queue;
753 * 2) In accordance to BER specified, discard the frame randomly;
754 * 3) If the frame survives BER, prepend it with delay info and move it
755 * to outbound (delay) queue;
756 * 4) Loop to 2) until bandwidth quota for this timeslice is reached, or
757 * inbound queue is flushed completely;
758 * 5) Dequeue frames from the outbound queue and send them downstream until
759 * outbound queue is flushed completely, or the next frame in the queue
760 * is not due to be dequeued yet
761 */
762 static void
763 pipe_dequeue(struct hookinfo *hinfo, struct timeval *now) {
764 static uint64_t rand, oldrand;
765 const node_p node = NG_HOOK_NODE(hinfo->hook);
766 const priv_p priv = NG_NODE_PRIVATE(node);
767 struct hookinfo *dest;
768 struct ngp_fifo *ngp_f, *ngp_f1;
769 struct ngp_hdr *ngp_h;
770 struct timeval *when;
771 struct mbuf *m;
772 int plen, error = 0;
773
774 /* Which one is the destination hook? */
775 if (hinfo == &priv->lower)
776 dest = &priv->upper;
777 else
778 dest = &priv->lower;
779
780 /* Bandwidth queue processing */
781 while ((ngp_f = TAILQ_FIRST(&hinfo->fifo_head))) {
782 when = &hinfo->qin_utime;
783 if (when->tv_sec > now->tv_sec || (when->tv_sec == now->tv_sec
784 && when->tv_usec > now->tv_usec))
785 break;
786
787 ngp_h = TAILQ_FIRST(&ngp_f->packet_head);
788 m = ngp_h->m;
789
790 /* Deficit Round Robin (DRR) processing */
791 if (hinfo->cfg.drr) {
792 if (ngp_f->rr_deficit >= m->m_pkthdr.len) {
793 ngp_f->rr_deficit -= m->m_pkthdr.len;
794 } else {
795 ngp_f->rr_deficit += hinfo->cfg.drr;
796 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
797 TAILQ_INSERT_TAIL(&hinfo->fifo_head,
798 ngp_f, fifo_le);
799 continue;
800 }
801 }
802
803 /*
804 * Either create a duplicate and pass it on, or dequeue
805 * the original packet...
806 */
807 if (hinfo->cfg.duplicate &&
808 random() % 100 <= hinfo->cfg.duplicate) {
809 ngp_h = uma_zalloc(ngp_zone, M_NOWAIT);
810 KASSERT(ngp_h != NULL, ("ngp_h zalloc failed (3)"));
811 m = m_dup(m, M_NOWAIT);
812 KASSERT(m != NULL, ("m_dup failed"));
813 ngp_h->m = m;
814 } else {
815 TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link);
816 hinfo->run.qin_frames--;
817 hinfo->run.qin_octets -= m->m_pkthdr.len;
818 ngp_f->packets--;
819 }
820
821 /* Calculate the serialization delay */
822 if (hinfo->cfg.bandwidth) {
823 hinfo->qin_utime.tv_usec +=
824 ((uint64_t) m->m_pkthdr.len + priv->overhead ) *
825 8000000 / hinfo->cfg.bandwidth;
826 hinfo->qin_utime.tv_sec +=
827 hinfo->qin_utime.tv_usec / 1000000;
828 hinfo->qin_utime.tv_usec =
829 hinfo->qin_utime.tv_usec % 1000000;
830 }
831 when = &ngp_h->when;
832 when->tv_sec = hinfo->qin_utime.tv_sec;
833 when->tv_usec = hinfo->qin_utime.tv_usec;
834
835 /* Sort / rearrange inbound queues */
836 if (ngp_f->packets) {
837 if (hinfo->cfg.wfq) {
838 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
839 FIFO_VTIME_SORT(TAILQ_FIRST(
840 &ngp_f->packet_head)->m->m_pkthdr.len)
841 }
842 } else {
843 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
844 uma_zfree(ngp_zone, ngp_f);
845 hinfo->run.fifo_queues--;
846 }
847
848 /* Randomly discard the frame, according to BER setting */
849 if (hinfo->cfg.ber) {
850 oldrand = rand;
851 rand = random();
852 if (((oldrand ^ rand) << 17) >=
853 hinfo->ber_p[priv->overhead + m->m_pkthdr.len]) {
854 hinfo->stats.out_disc_frames++;
855 hinfo->stats.out_disc_octets += m->m_pkthdr.len;
856 uma_zfree(ngp_zone, ngp_h);
857 m_freem(m);
858 continue;
859 }
860 }
861
862 /* Discard frame if outbound queue size limit exceeded */
863 if (hinfo->cfg.qout_size_limit &&
864 hinfo->run.qout_frames>=hinfo->cfg.qout_size_limit) {
865 hinfo->stats.out_disc_frames++;
866 hinfo->stats.out_disc_octets += m->m_pkthdr.len;
867 uma_zfree(ngp_zone, ngp_h);
868 m_freem(m);
869 continue;
870 }
871
872 /* Calculate the propagation delay */
873 when->tv_usec += priv->delay;
874 when->tv_sec += when->tv_usec / 1000000;
875 when->tv_usec = when->tv_usec % 1000000;
876
877 /* Put the frame into the delay queue */
878 TAILQ_INSERT_TAIL(&hinfo->qout_head, ngp_h, ngp_link);
879 hinfo->run.qout_frames++;
880 hinfo->run.qout_octets += m->m_pkthdr.len;
881 }
882
883 /* Delay queue processing */
884 while ((ngp_h = TAILQ_FIRST(&hinfo->qout_head))) {
885 when = &ngp_h->when;
886 m = ngp_h->m;
887 if (when->tv_sec > now->tv_sec ||
888 (when->tv_sec == now->tv_sec &&
889 when->tv_usec > now->tv_usec))
890 break;
891
892 /* Update outbound queue stats */
893 plen = m->m_pkthdr.len;
894 hinfo->run.qout_frames--;
895 hinfo->run.qout_octets -= plen;
896
897 /* Dequeue the packet from qout */
898 TAILQ_REMOVE(&hinfo->qout_head, ngp_h, ngp_link);
899 uma_zfree(ngp_zone, ngp_h);
900
901 NG_SEND_DATA(error, dest->hook, m, meta);
902 if (error) {
903 hinfo->stats.out_disc_frames++;
904 hinfo->stats.out_disc_octets += plen;
905 } else {
906 hinfo->stats.fwd_frames++;
907 hinfo->stats.fwd_octets += plen;
908 }
909 }
910
911 if ((hinfo->run.qin_frames != 0 || hinfo->run.qout_frames != 0) &&
912 !priv->timer_scheduled) {
913 ng_callout(&priv->timer, node, NULL, 1, ngp_callout, NULL, 0);
914 priv->timer_scheduled = 1;
915 }
916 }
917
918 /*
919 * This routine is called on every clock tick. We poll connected hooks
920 * for queued frames by calling pipe_dequeue().
921 */
922 static void
923 ngp_callout(node_p node, hook_p hook, void *arg1, int arg2)
924 {
925 const priv_p priv = NG_NODE_PRIVATE(node);
926 struct timeval now;
927
928 priv->timer_scheduled = 0;
929 microuptime(&now);
930 if (priv->upper.hook != NULL)
931 pipe_dequeue(&priv->upper, &now);
932 if (priv->lower.hook != NULL)
933 pipe_dequeue(&priv->lower, &now);
934 }
935
936 /*
937 * Shutdown processing
938 *
939 * This is tricky. If we have both a lower and upper hook, then we
940 * probably want to extricate ourselves and leave the two peers
941 * still linked to each other. Otherwise we should just shut down as
942 * a normal node would.
943 */
944 static int
945 ngp_shutdown(node_p node)
946 {
947 const priv_p priv = NG_NODE_PRIVATE(node);
948
949 if (priv->timer_scheduled)
950 ng_uncallout(&priv->timer, node);
951 if (priv->lower.hook && priv->upper.hook)
952 ng_bypass(priv->lower.hook, priv->upper.hook);
953 else {
954 if (priv->upper.hook != NULL)
955 ng_rmhook_self(priv->upper.hook);
956 if (priv->lower.hook != NULL)
957 ng_rmhook_self(priv->lower.hook);
958 }
959 NG_NODE_UNREF(node);
960 free(priv, M_NG_PIPE);
961 return (0);
962 }
963
964
965 /*
966 * Hook disconnection
967 */
968 static int
969 ngp_disconnect(hook_p hook)
970 {
971 struct hookinfo *const hinfo = NG_HOOK_PRIVATE(hook);
972 struct ngp_fifo *ngp_f;
973 struct ngp_hdr *ngp_h;
974
975 KASSERT(hinfo != NULL, ("%s: null info", __FUNCTION__));
976 hinfo->hook = NULL;
977
978 /* Flush all fifo queues associated with the hook */
979 while ((ngp_f = TAILQ_FIRST(&hinfo->fifo_head))) {
980 while ((ngp_h = TAILQ_FIRST(&ngp_f->packet_head))) {
981 TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link);
982 m_freem(ngp_h->m);
983 uma_zfree(ngp_zone, ngp_h);
984 }
985 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
986 uma_zfree(ngp_zone, ngp_f);
987 }
988
989 /* Flush the delay queue */
990 while ((ngp_h = TAILQ_FIRST(&hinfo->qout_head))) {
991 TAILQ_REMOVE(&hinfo->qout_head, ngp_h, ngp_link);
992 m_freem(ngp_h->m);
993 uma_zfree(ngp_zone, ngp_h);
994 }
995
996 /* Release the packet loss probability table (BER) */
997 if (hinfo->ber_p)
998 free(hinfo->ber_p, M_NG_PIPE);
999
1000 return (0);
1001 }
1002
1003 static int
1004 ngp_modevent(module_t mod, int type, void *unused)
1005 {
1006 int error = 0;
1007
1008 switch (type) {
1009 case MOD_LOAD:
1010 ngp_zone = uma_zcreate("ng_pipe", max(sizeof(struct ngp_hdr),
1011 sizeof (struct ngp_fifo)), NULL, NULL, NULL, NULL,
1012 UMA_ALIGN_PTR, 0);
1013 if (ngp_zone == NULL)
1014 panic("ng_pipe: couldn't allocate descriptor zone");
1015 break;
1016 case MOD_UNLOAD:
1017 uma_zdestroy(ngp_zone);
1018 break;
1019 default:
1020 error = EOPNOTSUPP;
1021 break;
1022 }
1023
1024 return (error);
1025 }
Cache object: 0deb44f48afea5f9c6cdc4d51dd314d3
|