FreeBSD/Linux Kernel Cross Reference
sys/net/if_lagg.c
1 /* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __FBSDID("$FreeBSD: releng/7.3/sys/net/if_lagg.c 183223 2008-09-21 06:01:03Z thompsa $");
22
23 #include "opt_inet.h"
24 #include "opt_inet6.h"
25
26 #include <sys/param.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/mbuf.h>
30 #include <sys/queue.h>
31 #include <sys/socket.h>
32 #include <sys/sockio.h>
33 #include <sys/sysctl.h>
34 #include <sys/module.h>
35 #include <sys/priv.h>
36 #include <sys/systm.h>
37 #include <sys/proc.h>
38 #include <sys/hash.h>
39 #include <sys/lock.h>
40 #include <sys/rwlock.h>
41 #include <sys/taskqueue.h>
42
43 #include <net/ethernet.h>
44 #include <net/if.h>
45 #include <net/if_clone.h>
46 #include <net/if_arp.h>
47 #include <net/if_dl.h>
48 #include <net/if_llc.h>
49 #include <net/if_media.h>
50 #include <net/if_types.h>
51 #include <net/if_var.h>
52 #include <net/bpf.h>
53
54 #ifdef INET
55 #include <netinet/in.h>
56 #include <netinet/in_systm.h>
57 #include <netinet/if_ether.h>
58 #include <netinet/ip.h>
59 #endif
60
61 #ifdef INET6
62 #include <netinet/ip6.h>
63 #endif
64
65 #include <net/if_vlan_var.h>
66 #include <net/if_lagg.h>
67 #include <net/ieee8023ad_lacp.h>
68
69 /* Special flags we should propagate to the lagg ports. */
70 static struct {
71 int flag;
72 int (*func)(struct ifnet *, int);
73 } lagg_pflags[] = {
74 {IFF_PROMISC, ifpromisc},
75 {IFF_ALLMULTI, if_allmulti},
76 {0, NULL}
77 };
78
79 SLIST_HEAD(__trhead, lagg_softc) lagg_list; /* list of laggs */
80 static struct mtx lagg_list_mtx;
81 eventhandler_tag lagg_detach_cookie = NULL;
82
83 static int lagg_clone_create(struct if_clone *, int, caddr_t);
84 static void lagg_clone_destroy(struct ifnet *);
85 static void lagg_lladdr(struct lagg_softc *, uint8_t *);
86 static void lagg_capabilities(struct lagg_softc *);
87 static void lagg_port_lladdr(struct lagg_port *, uint8_t *);
88 static void lagg_port_setlladdr(void *, int);
89 static int lagg_port_create(struct lagg_softc *, struct ifnet *);
90 static int lagg_port_destroy(struct lagg_port *, int);
91 static struct mbuf *lagg_input(struct ifnet *, struct mbuf *);
92 static void lagg_linkstate(struct lagg_softc *);
93 static void lagg_port_state(struct ifnet *, int);
94 static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
95 static int lagg_port_output(struct ifnet *, struct mbuf *,
96 struct sockaddr *, struct rtentry *);
97 static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
98 static int lagg_port_checkstacking(struct lagg_softc *);
99 static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
100 static void lagg_init(void *);
101 static void lagg_stop(struct lagg_softc *);
102 static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
103 static int lagg_ether_setmulti(struct lagg_softc *);
104 static int lagg_ether_cmdmulti(struct lagg_port *, int);
105 static int lagg_setflag(struct lagg_port *, int, int,
106 int (*func)(struct ifnet *, int));
107 static int lagg_setflags(struct lagg_port *, int status);
108 static void lagg_start(struct ifnet *);
109 static int lagg_media_change(struct ifnet *);
110 static void lagg_media_status(struct ifnet *, struct ifmediareq *);
111 static struct lagg_port *lagg_link_active(struct lagg_softc *,
112 struct lagg_port *);
113 static const void *lagg_gethdr(struct mbuf *, u_int, u_int, void *);
114
115 IFC_SIMPLE_DECLARE(lagg, 0);
116
117 /* Simple round robin */
118 static int lagg_rr_attach(struct lagg_softc *);
119 static int lagg_rr_detach(struct lagg_softc *);
120 static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
121 static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
122 struct mbuf *);
123
124 /* Active failover */
125 static int lagg_fail_attach(struct lagg_softc *);
126 static int lagg_fail_detach(struct lagg_softc *);
127 static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
128 static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
129 struct mbuf *);
130
131 /* Loadbalancing */
132 static int lagg_lb_attach(struct lagg_softc *);
133 static int lagg_lb_detach(struct lagg_softc *);
134 static int lagg_lb_port_create(struct lagg_port *);
135 static void lagg_lb_port_destroy(struct lagg_port *);
136 static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
137 static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
138 struct mbuf *);
139 static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
140
141 /* 802.3ad LACP */
142 static int lagg_lacp_attach(struct lagg_softc *);
143 static int lagg_lacp_detach(struct lagg_softc *);
144 static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
145 static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
146 struct mbuf *);
147 static void lagg_lacp_lladdr(struct lagg_softc *);
148
149 /* lagg protocol table */
150 static const struct {
151 int ti_proto;
152 int (*ti_attach)(struct lagg_softc *);
153 } lagg_protos[] = {
154 { LAGG_PROTO_ROUNDROBIN, lagg_rr_attach },
155 { LAGG_PROTO_FAILOVER, lagg_fail_attach },
156 { LAGG_PROTO_LOADBALANCE, lagg_lb_attach },
157 { LAGG_PROTO_ETHERCHANNEL, lagg_lb_attach },
158 { LAGG_PROTO_LACP, lagg_lacp_attach },
159 { LAGG_PROTO_NONE, NULL }
160 };
161
162 static int
163 lagg_modevent(module_t mod, int type, void *data)
164 {
165
166 switch (type) {
167 case MOD_LOAD:
168 mtx_init(&lagg_list_mtx, "if_lagg list", NULL, MTX_DEF);
169 SLIST_INIT(&lagg_list);
170 if_clone_attach(&lagg_cloner);
171 lagg_input_p = lagg_input;
172 lagg_linkstate_p = lagg_port_state;
173 lagg_detach_cookie = EVENTHANDLER_REGISTER(
174 ifnet_departure_event, lagg_port_ifdetach, NULL,
175 EVENTHANDLER_PRI_ANY);
176 break;
177 case MOD_UNLOAD:
178 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
179 lagg_detach_cookie);
180 if_clone_detach(&lagg_cloner);
181 lagg_input_p = NULL;
182 lagg_linkstate_p = NULL;
183 mtx_destroy(&lagg_list_mtx);
184 break;
185 default:
186 return (EOPNOTSUPP);
187 }
188 return (0);
189 }
190
191 static moduledata_t lagg_mod = {
192 "if_lagg",
193 lagg_modevent,
194 0
195 };
196
197 DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
198
199 static int
200 lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
201 {
202 struct lagg_softc *sc;
203 struct ifnet *ifp;
204 int i, error = 0;
205 static const u_char eaddr[6]; /* 00:00:00:00:00:00 */
206
207 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
208 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
209 if (ifp == NULL) {
210 free(sc, M_DEVBUF);
211 return (ENOSPC);
212 }
213
214 sc->sc_proto = LAGG_PROTO_NONE;
215 for (i = 0; lagg_protos[i].ti_proto != LAGG_PROTO_NONE; i++) {
216 if (lagg_protos[i].ti_proto == LAGG_PROTO_DEFAULT) {
217 sc->sc_proto = lagg_protos[i].ti_proto;
218 if ((error = lagg_protos[i].ti_attach(sc)) != 0) {
219 if_free_type(ifp, IFT_ETHER);
220 free(sc, M_DEVBUF);
221 return (error);
222 }
223 break;
224 }
225 }
226 LAGG_LOCK_INIT(sc);
227 SLIST_INIT(&sc->sc_ports);
228 TASK_INIT(&sc->sc_lladdr_task, 0, lagg_port_setlladdr, sc);
229
230 /* Initialise pseudo media types */
231 ifmedia_init(&sc->sc_media, 0, lagg_media_change,
232 lagg_media_status);
233 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
234 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
235
236 if_initname(ifp, ifc->ifc_name, unit);
237 ifp->if_type = IFT_ETHER;
238 ifp->if_softc = sc;
239 ifp->if_start = lagg_start;
240 ifp->if_init = lagg_init;
241 ifp->if_ioctl = lagg_ioctl;
242 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
243
244 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
245 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
246 IFQ_SET_READY(&ifp->if_snd);
247
248 /*
249 * Attach as an ordinary ethernet device, childs will be attached
250 * as special device IFT_IEEE8023ADLAG.
251 */
252 ether_ifattach(ifp, eaddr);
253
254 /* Insert into the global list of laggs */
255 mtx_lock(&lagg_list_mtx);
256 SLIST_INSERT_HEAD(&lagg_list, sc, sc_entries);
257 mtx_unlock(&lagg_list_mtx);
258
259 return (0);
260 }
261
262 static void
263 lagg_clone_destroy(struct ifnet *ifp)
264 {
265 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
266 struct lagg_port *lp;
267
268 LAGG_WLOCK(sc);
269
270 lagg_stop(sc);
271 ifp->if_flags &= ~IFF_UP;
272
273 /* Shutdown and remove lagg ports */
274 while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL)
275 lagg_port_destroy(lp, 1);
276 /* Unhook the aggregation protocol */
277 (*sc->sc_detach)(sc);
278
279 LAGG_WUNLOCK(sc);
280
281 ifmedia_removeall(&sc->sc_media);
282 ether_ifdetach(ifp);
283 if_free_type(ifp, IFT_ETHER);
284
285 mtx_lock(&lagg_list_mtx);
286 SLIST_REMOVE(&lagg_list, sc, lagg_softc, sc_entries);
287 mtx_unlock(&lagg_list_mtx);
288
289 taskqueue_drain(taskqueue_swi, &sc->sc_lladdr_task);
290 LAGG_LOCK_DESTROY(sc);
291 free(sc, M_DEVBUF);
292 }
293
294 static void
295 lagg_lladdr(struct lagg_softc *sc, uint8_t *lladdr)
296 {
297 struct ifnet *ifp = sc->sc_ifp;
298
299 if (memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
300 return;
301
302 bcopy(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN);
303 /* Let the protocol know the MAC has changed */
304 if (sc->sc_lladdr != NULL)
305 (*sc->sc_lladdr)(sc);
306 }
307
308 static void
309 lagg_capabilities(struct lagg_softc *sc)
310 {
311 struct lagg_port *lp;
312 int cap = ~0, ena = ~0;
313
314 LAGG_WLOCK_ASSERT(sc);
315
316 /* Get capabilities from the lagg ports */
317 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
318 cap &= lp->lp_ifp->if_capabilities;
319 ena &= lp->lp_ifp->if_capenable;
320 }
321 cap = (cap == ~0 ? 0 : cap);
322 ena = (ena == ~0 ? 0 : ena);
323
324 if (sc->sc_ifp->if_capabilities != cap ||
325 sc->sc_ifp->if_capenable != ena) {
326 sc->sc_ifp->if_capabilities = cap;
327 sc->sc_ifp->if_capenable = ena;
328 getmicrotime(&sc->sc_ifp->if_lastchange);
329
330 if (sc->sc_ifflags & IFF_DEBUG)
331 if_printf(sc->sc_ifp,
332 "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
333 }
334 }
335
336 static void
337 lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr)
338 {
339 struct lagg_softc *sc = lp->lp_softc;
340 struct ifnet *ifp = lp->lp_ifp;
341 struct lagg_llq *llq;
342 int pending = 0;
343
344 LAGG_WLOCK_ASSERT(sc);
345
346 if (lp->lp_detaching ||
347 memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
348 return;
349
350 /* Check to make sure its not already queued to be changed */
351 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
352 if (llq->llq_ifp == ifp) {
353 pending = 1;
354 break;
355 }
356 }
357
358 if (!pending) {
359 llq = malloc(sizeof(struct lagg_llq), M_DEVBUF, M_NOWAIT);
360 if (llq == NULL) /* XXX what to do */
361 return;
362 }
363
364 /* Update the lladdr even if pending, it may have changed */
365 llq->llq_ifp = ifp;
366 bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN);
367
368 if (!pending)
369 SLIST_INSERT_HEAD(&sc->sc_llq_head, llq, llq_entries);
370
371 taskqueue_enqueue(taskqueue_swi, &sc->sc_lladdr_task);
372 }
373
374 /*
375 * Set the interface MAC address from a taskqueue to avoid a LOR.
376 */
377 static void
378 lagg_port_setlladdr(void *arg, int pending)
379 {
380 struct lagg_softc *sc = (struct lagg_softc *)arg;
381 struct lagg_llq *llq, *head;
382 struct ifnet *ifp;
383 int error;
384
385 /* Grab a local reference of the queue and remove it from the softc */
386 LAGG_WLOCK(sc);
387 head = SLIST_FIRST(&sc->sc_llq_head);
388 SLIST_FIRST(&sc->sc_llq_head) = NULL;
389 LAGG_WUNLOCK(sc);
390
391 /*
392 * Traverse the queue and set the lladdr on each ifp. It is safe to do
393 * unlocked as we have the only reference to it.
394 */
395 for (llq = head; llq != NULL; llq = head) {
396 ifp = llq->llq_ifp;
397
398 /* Set the link layer address */
399 error = if_setlladdr(ifp, llq->llq_lladdr, ETHER_ADDR_LEN);
400 if (error)
401 printf("%s: setlladdr failed on %s\n", __func__,
402 ifp->if_xname);
403
404 head = SLIST_NEXT(llq, llq_entries);
405 free(llq, M_DEVBUF);
406 }
407 }
408
409 static int
410 lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
411 {
412 struct lagg_softc *sc_ptr;
413 struct lagg_port *lp;
414 int error = 0;
415
416 LAGG_WLOCK_ASSERT(sc);
417
418 /* Limit the maximal number of lagg ports */
419 if (sc->sc_count >= LAGG_MAX_PORTS)
420 return (ENOSPC);
421
422 /* New lagg port has to be in an idle state */
423 if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
424 return (EBUSY);
425
426 /* Check if port has already been associated to a lagg */
427 if (ifp->if_lagg != NULL)
428 return (EBUSY);
429
430 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */
431 if (ifp->if_type != IFT_ETHER)
432 return (EPROTONOSUPPORT);
433
434 /* Allow the first Ethernet member to define the MTU */
435 if (SLIST_EMPTY(&sc->sc_ports))
436 sc->sc_ifp->if_mtu = ifp->if_mtu;
437 else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
438 if_printf(sc->sc_ifp, "invalid MTU for %s\n",
439 ifp->if_xname);
440 return (EINVAL);
441 }
442
443 if ((lp = malloc(sizeof(struct lagg_port),
444 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
445 return (ENOMEM);
446
447 /* Check if port is a stacked lagg */
448 mtx_lock(&lagg_list_mtx);
449 SLIST_FOREACH(sc_ptr, &lagg_list, sc_entries) {
450 if (ifp == sc_ptr->sc_ifp) {
451 mtx_unlock(&lagg_list_mtx);
452 free(lp, M_DEVBUF);
453 return (EINVAL);
454 /* XXX disable stacking for the moment, its untested
455 lp->lp_flags |= LAGG_PORT_STACK;
456 if (lagg_port_checkstacking(sc_ptr) >=
457 LAGG_MAX_STACKING) {
458 mtx_unlock(&lagg_list_mtx);
459 free(lp, M_DEVBUF);
460 return (E2BIG);
461 }
462 */
463 }
464 }
465 mtx_unlock(&lagg_list_mtx);
466
467 /* Change the interface type */
468 lp->lp_iftype = ifp->if_type;
469 ifp->if_type = IFT_IEEE8023ADLAG;
470 ifp->if_lagg = lp;
471 lp->lp_ioctl = ifp->if_ioctl;
472 ifp->if_ioctl = lagg_port_ioctl;
473 lp->lp_output = ifp->if_output;
474 ifp->if_output = lagg_port_output;
475
476 lp->lp_ifp = ifp;
477 lp->lp_softc = sc;
478
479 /* Save port link layer address */
480 bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
481
482 if (SLIST_EMPTY(&sc->sc_ports)) {
483 sc->sc_primary = lp;
484 lagg_lladdr(sc, IF_LLADDR(ifp));
485 } else {
486 /* Update link layer address for this port */
487 lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp));
488 }
489
490 /* Insert into the list of ports */
491 SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
492 sc->sc_count++;
493
494 /* Update lagg capabilities */
495 lagg_capabilities(sc);
496 lagg_linkstate(sc);
497
498 /* Add multicast addresses and interface flags to this port */
499 lagg_ether_cmdmulti(lp, 1);
500 lagg_setflags(lp, 1);
501
502 if (sc->sc_port_create != NULL)
503 error = (*sc->sc_port_create)(lp);
504 if (error) {
505 /* remove the port again, without calling sc_port_destroy */
506 lagg_port_destroy(lp, 0);
507 return (error);
508 }
509
510 return (error);
511 }
512
513 static int
514 lagg_port_checkstacking(struct lagg_softc *sc)
515 {
516 struct lagg_softc *sc_ptr;
517 struct lagg_port *lp;
518 int m = 0;
519
520 LAGG_WLOCK_ASSERT(sc);
521
522 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
523 if (lp->lp_flags & LAGG_PORT_STACK) {
524 sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
525 m = MAX(m, lagg_port_checkstacking(sc_ptr));
526 }
527 }
528
529 return (m + 1);
530 }
531
532 static int
533 lagg_port_destroy(struct lagg_port *lp, int runpd)
534 {
535 struct lagg_softc *sc = lp->lp_softc;
536 struct lagg_port *lp_ptr;
537 struct lagg_llq *llq;
538 struct ifnet *ifp = lp->lp_ifp;
539
540 LAGG_WLOCK_ASSERT(sc);
541
542 if (runpd && sc->sc_port_destroy != NULL)
543 (*sc->sc_port_destroy)(lp);
544
545 /*
546 * Remove multicast addresses and interface flags from this port and
547 * reset the MAC address, skip if the interface is being detached.
548 */
549 if (!lp->lp_detaching) {
550 lagg_ether_cmdmulti(lp, 0);
551 lagg_setflags(lp, 0);
552 lagg_port_lladdr(lp, lp->lp_lladdr);
553 }
554
555 /* Restore interface */
556 ifp->if_type = lp->lp_iftype;
557 ifp->if_ioctl = lp->lp_ioctl;
558 ifp->if_output = lp->lp_output;
559 ifp->if_lagg = NULL;
560
561 /* Finally, remove the port from the lagg */
562 SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
563 sc->sc_count--;
564
565 /* Update the primary interface */
566 if (lp == sc->sc_primary) {
567 uint8_t lladdr[ETHER_ADDR_LEN];
568
569 if ((lp_ptr = SLIST_FIRST(&sc->sc_ports)) == NULL) {
570 bzero(&lladdr, ETHER_ADDR_LEN);
571 } else {
572 bcopy(lp_ptr->lp_lladdr,
573 lladdr, ETHER_ADDR_LEN);
574 }
575 lagg_lladdr(sc, lladdr);
576 sc->sc_primary = lp_ptr;
577
578 /* Update link layer address for each port */
579 SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
580 lagg_port_lladdr(lp_ptr, lladdr);
581 }
582
583 /* Remove any pending lladdr changes from the queue */
584 if (lp->lp_detaching) {
585 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
586 if (llq->llq_ifp == ifp) {
587 SLIST_REMOVE(&sc->sc_llq_head, llq, lagg_llq,
588 llq_entries);
589 free(llq, M_DEVBUF);
590 break; /* Only appears once */
591 }
592 }
593 }
594
595 if (lp->lp_ifflags)
596 if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
597
598 free(lp, M_DEVBUF);
599
600 /* Update lagg capabilities */
601 lagg_capabilities(sc);
602 lagg_linkstate(sc);
603
604 return (0);
605 }
606
607 static int
608 lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
609 {
610 struct lagg_reqport *rp = (struct lagg_reqport *)data;
611 struct lagg_softc *sc;
612 struct lagg_port *lp = NULL;
613 int error = 0;
614
615 /* Should be checked by the caller */
616 if (ifp->if_type != IFT_IEEE8023ADLAG ||
617 (lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
618 goto fallback;
619
620 switch (cmd) {
621 case SIOCGLAGGPORT:
622 if (rp->rp_portname[0] == '\0' ||
623 ifunit(rp->rp_portname) != ifp) {
624 error = EINVAL;
625 break;
626 }
627
628 LAGG_RLOCK(sc);
629 if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
630 error = ENOENT;
631 LAGG_RUNLOCK(sc);
632 break;
633 }
634
635 lagg_port2req(lp, rp);
636 LAGG_RUNLOCK(sc);
637 break;
638
639 case SIOCSIFCAP:
640 if (lp->lp_ioctl == NULL) {
641 error = EINVAL;
642 break;
643 }
644 error = (*lp->lp_ioctl)(ifp, cmd, data);
645 if (error)
646 break;
647
648 /* Update lagg interface capabilities */
649 LAGG_WLOCK(sc);
650 lagg_capabilities(sc);
651 LAGG_WUNLOCK(sc);
652 break;
653
654 case SIOCSIFMTU:
655 /* Do not allow the MTU to be changed once joined */
656 error = EINVAL;
657 break;
658
659 default:
660 goto fallback;
661 }
662
663 return (error);
664
665 fallback:
666 if (lp->lp_ioctl != NULL)
667 return ((*lp->lp_ioctl)(ifp, cmd, data));
668
669 return (EINVAL);
670 }
671
672 static int
673 lagg_port_output(struct ifnet *ifp, struct mbuf *m,
674 struct sockaddr *dst, struct rtentry *rt0)
675 {
676 struct lagg_port *lp = ifp->if_lagg;
677 struct ether_header *eh;
678 short type = 0;
679
680 switch (dst->sa_family) {
681 case pseudo_AF_HDRCMPLT:
682 case AF_UNSPEC:
683 eh = (struct ether_header *)dst->sa_data;
684 type = eh->ether_type;
685 break;
686 }
687
688 /*
689 * Only allow ethernet types required to initiate or maintain the link,
690 * aggregated frames take a different path.
691 */
692 switch (ntohs(type)) {
693 case ETHERTYPE_PAE: /* EAPOL PAE/802.1x */
694 return ((*lp->lp_output)(ifp, m, dst, rt0));
695 }
696
697 /* drop any other frames */
698 m_freem(m);
699 return (EBUSY);
700 }
701
702 static void
703 lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
704 {
705 struct lagg_port *lp;
706 struct lagg_softc *sc;
707
708 if ((lp = ifp->if_lagg) == NULL)
709 return;
710
711 sc = lp->lp_softc;
712
713 LAGG_WLOCK(sc);
714 lp->lp_detaching = 1;
715 lagg_port_destroy(lp, 1);
716 LAGG_WUNLOCK(sc);
717 }
718
719 static void
720 lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
721 {
722 struct lagg_softc *sc = lp->lp_softc;
723
724 strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
725 strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
726 rp->rp_prio = lp->lp_prio;
727 rp->rp_flags = lp->lp_flags;
728 if (sc->sc_portreq != NULL)
729 (*sc->sc_portreq)(lp, (caddr_t)&rp->rp_psc);
730
731 /* Add protocol specific flags */
732 switch (sc->sc_proto) {
733 case LAGG_PROTO_FAILOVER:
734 if (lp == sc->sc_primary)
735 rp->rp_flags |= LAGG_PORT_MASTER;
736 if (lp == lagg_link_active(sc, sc->sc_primary))
737 rp->rp_flags |= LAGG_PORT_ACTIVE;
738 break;
739
740 case LAGG_PROTO_ROUNDROBIN:
741 case LAGG_PROTO_LOADBALANCE:
742 case LAGG_PROTO_ETHERCHANNEL:
743 if (LAGG_PORTACTIVE(lp))
744 rp->rp_flags |= LAGG_PORT_ACTIVE;
745 break;
746
747 case LAGG_PROTO_LACP:
748 /* LACP has a different definition of active */
749 if (lacp_isactive(lp))
750 rp->rp_flags |= LAGG_PORT_ACTIVE;
751 if (lacp_iscollecting(lp))
752 rp->rp_flags |= LAGG_PORT_COLLECTING;
753 if (lacp_isdistributing(lp))
754 rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
755 break;
756 }
757
758 }
759
760 static void
761 lagg_init(void *xsc)
762 {
763 struct lagg_softc *sc = (struct lagg_softc *)xsc;
764 struct lagg_port *lp;
765 struct ifnet *ifp = sc->sc_ifp;
766
767 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
768 return;
769
770 LAGG_WLOCK(sc);
771
772 ifp->if_drv_flags |= IFF_DRV_RUNNING;
773 /* Update the port lladdrs */
774 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
775 lagg_port_lladdr(lp, IF_LLADDR(ifp));
776
777 if (sc->sc_init != NULL)
778 (*sc->sc_init)(sc);
779
780 LAGG_WUNLOCK(sc);
781 }
782
783 static void
784 lagg_stop(struct lagg_softc *sc)
785 {
786 struct ifnet *ifp = sc->sc_ifp;
787
788 LAGG_WLOCK_ASSERT(sc);
789
790 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
791 return;
792
793 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
794
795 if (sc->sc_stop != NULL)
796 (*sc->sc_stop)(sc);
797 }
798
799 static int
800 lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
801 {
802 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
803 struct lagg_reqall *ra = (struct lagg_reqall *)data;
804 struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
805 struct ifreq *ifr = (struct ifreq *)data;
806 struct lagg_port *lp;
807 struct ifnet *tpif;
808 struct thread *td = curthread;
809 char *buf, *outbuf;
810 int count, buflen, len, error = 0;
811
812 bzero(&rpbuf, sizeof(rpbuf));
813
814 switch (cmd) {
815 case SIOCGLAGG:
816 LAGG_RLOCK(sc);
817 count = 0;
818 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
819 count++;
820 buflen = count * sizeof(struct lagg_reqport);
821 LAGG_RUNLOCK(sc);
822
823 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
824
825 LAGG_RLOCK(sc);
826 ra->ra_proto = sc->sc_proto;
827 if (sc->sc_req != NULL)
828 (*sc->sc_req)(sc, (caddr_t)&ra->ra_psc);
829
830 count = 0;
831 buf = outbuf;
832 len = min(ra->ra_size, buflen);
833 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
834 if (len < sizeof(rpbuf))
835 break;
836
837 lagg_port2req(lp, &rpbuf);
838 memcpy(buf, &rpbuf, sizeof(rpbuf));
839 count++;
840 buf += sizeof(rpbuf);
841 len -= sizeof(rpbuf);
842 }
843 LAGG_RUNLOCK(sc);
844 ra->ra_ports = count;
845 ra->ra_size = count * sizeof(rpbuf);
846 error = copyout(outbuf, ra->ra_port, ra->ra_size);
847 free(outbuf, M_TEMP);
848 break;
849 case SIOCSLAGG:
850 error = priv_check(td, PRIV_NET_LAGG);
851 if (error)
852 break;
853 if (ra->ra_proto >= LAGG_PROTO_MAX) {
854 error = EPROTONOSUPPORT;
855 break;
856 }
857 if (sc->sc_proto != LAGG_PROTO_NONE) {
858 LAGG_WLOCK(sc);
859 error = sc->sc_detach(sc);
860 /* Reset protocol and pointers */
861 sc->sc_proto = LAGG_PROTO_NONE;
862 sc->sc_detach = NULL;
863 sc->sc_start = NULL;
864 sc->sc_input = NULL;
865 sc->sc_port_create = NULL;
866 sc->sc_port_destroy = NULL;
867 sc->sc_linkstate = NULL;
868 sc->sc_init = NULL;
869 sc->sc_stop = NULL;
870 sc->sc_lladdr = NULL;
871 sc->sc_req = NULL;
872 sc->sc_portreq = NULL;
873 LAGG_WUNLOCK(sc);
874 }
875 if (error != 0)
876 break;
877 for (int i = 0; i < (sizeof(lagg_protos) /
878 sizeof(lagg_protos[0])); i++) {
879 if (lagg_protos[i].ti_proto == ra->ra_proto) {
880 if (sc->sc_ifflags & IFF_DEBUG)
881 printf("%s: using proto %u\n",
882 sc->sc_ifname,
883 lagg_protos[i].ti_proto);
884 LAGG_WLOCK(sc);
885 sc->sc_proto = lagg_protos[i].ti_proto;
886 if (sc->sc_proto != LAGG_PROTO_NONE)
887 error = lagg_protos[i].ti_attach(sc);
888 LAGG_WUNLOCK(sc);
889 return (error);
890 }
891 }
892 error = EPROTONOSUPPORT;
893 break;
894 case SIOCGLAGGPORT:
895 if (rp->rp_portname[0] == '\0' ||
896 (tpif = ifunit(rp->rp_portname)) == NULL) {
897 error = EINVAL;
898 break;
899 }
900
901 LAGG_RLOCK(sc);
902 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
903 lp->lp_softc != sc) {
904 error = ENOENT;
905 LAGG_RUNLOCK(sc);
906 break;
907 }
908
909 lagg_port2req(lp, rp);
910 LAGG_RUNLOCK(sc);
911 break;
912 case SIOCSLAGGPORT:
913 error = priv_check(td, PRIV_NET_LAGG);
914 if (error)
915 break;
916 if (rp->rp_portname[0] == '\0' ||
917 (tpif = ifunit(rp->rp_portname)) == NULL) {
918 error = EINVAL;
919 break;
920 }
921 LAGG_WLOCK(sc);
922 error = lagg_port_create(sc, tpif);
923 LAGG_WUNLOCK(sc);
924 break;
925 case SIOCSLAGGDELPORT:
926 error = priv_check(td, PRIV_NET_LAGG);
927 if (error)
928 break;
929 if (rp->rp_portname[0] == '\0' ||
930 (tpif = ifunit(rp->rp_portname)) == NULL) {
931 error = EINVAL;
932 break;
933 }
934
935 LAGG_WLOCK(sc);
936 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
937 lp->lp_softc != sc) {
938 error = ENOENT;
939 LAGG_WUNLOCK(sc);
940 break;
941 }
942
943 error = lagg_port_destroy(lp, 1);
944 LAGG_WUNLOCK(sc);
945 break;
946 case SIOCSIFFLAGS:
947 /* Set flags on ports too */
948 LAGG_WLOCK(sc);
949 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
950 lagg_setflags(lp, 1);
951 }
952 LAGG_WUNLOCK(sc);
953
954 if (!(ifp->if_flags & IFF_UP) &&
955 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
956 /*
957 * If interface is marked down and it is running,
958 * then stop and disable it.
959 */
960 LAGG_WLOCK(sc);
961 lagg_stop(sc);
962 LAGG_WUNLOCK(sc);
963 } else if ((ifp->if_flags & IFF_UP) &&
964 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
965 /*
966 * If interface is marked up and it is stopped, then
967 * start it.
968 */
969 (*ifp->if_init)(sc);
970 }
971 break;
972 case SIOCADDMULTI:
973 case SIOCDELMULTI:
974 LAGG_WLOCK(sc);
975 error = lagg_ether_setmulti(sc);
976 LAGG_WUNLOCK(sc);
977 break;
978 case SIOCSIFMEDIA:
979 case SIOCGIFMEDIA:
980 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
981 break;
982
983 case SIOCSIFCAP:
984 case SIOCSIFMTU:
985 /* Do not allow the MTU or caps to be directly changed */
986 error = EINVAL;
987 break;
988
989 default:
990 error = ether_ioctl(ifp, cmd, data);
991 break;
992 }
993 return (error);
994 }
995
996 static int
997 lagg_ether_setmulti(struct lagg_softc *sc)
998 {
999 struct lagg_port *lp;
1000
1001 LAGG_WLOCK_ASSERT(sc);
1002
1003 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1004 /* First, remove any existing filter entries. */
1005 lagg_ether_cmdmulti(lp, 0);
1006 /* copy all addresses from the lagg interface to the port */
1007 lagg_ether_cmdmulti(lp, 1);
1008 }
1009 return (0);
1010 }
1011
1012 static int
1013 lagg_ether_cmdmulti(struct lagg_port *lp, int set)
1014 {
1015 struct lagg_softc *sc = lp->lp_softc;
1016 struct ifnet *ifp = lp->lp_ifp;
1017 struct ifnet *scifp = sc->sc_ifp;
1018 struct lagg_mc *mc;
1019 struct ifmultiaddr *ifma, *rifma = NULL;
1020 struct sockaddr_dl sdl;
1021 int error;
1022
1023 LAGG_WLOCK_ASSERT(sc);
1024
1025 bzero((char *)&sdl, sizeof(sdl));
1026 sdl.sdl_len = sizeof(sdl);
1027 sdl.sdl_family = AF_LINK;
1028 sdl.sdl_type = IFT_ETHER;
1029 sdl.sdl_alen = ETHER_ADDR_LEN;
1030 sdl.sdl_index = ifp->if_index;
1031
1032 if (set) {
1033 TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
1034 if (ifma->ifma_addr->sa_family != AF_LINK)
1035 continue;
1036 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1037 LLADDR(&sdl), ETHER_ADDR_LEN);
1038
1039 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
1040 if (error)
1041 return (error);
1042 mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT);
1043 if (mc == NULL)
1044 return (ENOMEM);
1045 mc->mc_ifma = rifma;
1046 SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
1047 }
1048 } else {
1049 while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
1050 SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
1051 if_delmulti_ifma(mc->mc_ifma);
1052 free(mc, M_DEVBUF);
1053 }
1054 }
1055 return (0);
1056 }
1057
1058 /* Handle a ref counted flag that should be set on the lagg port as well */
1059 static int
1060 lagg_setflag(struct lagg_port *lp, int flag, int status,
1061 int (*func)(struct ifnet *, int))
1062 {
1063 struct lagg_softc *sc = lp->lp_softc;
1064 struct ifnet *scifp = sc->sc_ifp;
1065 struct ifnet *ifp = lp->lp_ifp;
1066 int error;
1067
1068 LAGG_WLOCK_ASSERT(sc);
1069
1070 status = status ? (scifp->if_flags & flag) : 0;
1071 /* Now "status" contains the flag value or 0 */
1072
1073 /*
1074 * See if recorded ports status is different from what
1075 * we want it to be. If it is, flip it. We record ports
1076 * status in lp_ifflags so that we won't clear ports flag
1077 * we haven't set. In fact, we don't clear or set ports
1078 * flags directly, but get or release references to them.
1079 * That's why we can be sure that recorded flags still are
1080 * in accord with actual ports flags.
1081 */
1082 if (status != (lp->lp_ifflags & flag)) {
1083 error = (*func)(ifp, status);
1084 if (error)
1085 return (error);
1086 lp->lp_ifflags &= ~flag;
1087 lp->lp_ifflags |= status;
1088 }
1089 return (0);
1090 }
1091
1092 /*
1093 * Handle IFF_* flags that require certain changes on the lagg port
1094 * if "status" is true, update ports flags respective to the lagg
1095 * if "status" is false, forcedly clear the flags set on port.
1096 */
1097 static int
1098 lagg_setflags(struct lagg_port *lp, int status)
1099 {
1100 int error, i;
1101
1102 for (i = 0; lagg_pflags[i].flag; i++) {
1103 error = lagg_setflag(lp, lagg_pflags[i].flag,
1104 status, lagg_pflags[i].func);
1105 if (error)
1106 return (error);
1107 }
1108 return (0);
1109 }
1110
1111 static void
1112 lagg_start(struct ifnet *ifp)
1113 {
1114 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1115 struct mbuf *m;
1116 int error = 0;
1117
1118 LAGG_RLOCK(sc);
1119 for (;; error = 0) {
1120 IFQ_DEQUEUE(&ifp->if_snd, m);
1121 if (m == NULL)
1122 break;
1123
1124 ETHER_BPF_MTAP(ifp, m);
1125
1126 /* We need a Tx algorithm and at least one port */
1127 if (sc->sc_proto != LAGG_PROTO_NONE && sc->sc_count)
1128 error = (*sc->sc_start)(sc, m);
1129 else
1130 m_freem(m);
1131
1132 if (error == 0)
1133 ifp->if_opackets++;
1134 else
1135 ifp->if_oerrors++;
1136 }
1137 LAGG_RUNLOCK(sc);
1138
1139 return;
1140 }
1141
1142 static struct mbuf *
1143 lagg_input(struct ifnet *ifp, struct mbuf *m)
1144 {
1145 struct lagg_port *lp = ifp->if_lagg;
1146 struct lagg_softc *sc = lp->lp_softc;
1147 struct ifnet *scifp = sc->sc_ifp;
1148
1149 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1150 (lp->lp_flags & LAGG_PORT_DISABLED) ||
1151 sc->sc_proto == LAGG_PROTO_NONE) {
1152 m_freem(m);
1153 return (NULL);
1154 }
1155
1156 LAGG_RLOCK(sc);
1157 ETHER_BPF_MTAP(scifp, m);
1158
1159 m = (*sc->sc_input)(sc, lp, m);
1160
1161 if (m != NULL) {
1162 scifp->if_ipackets++;
1163 scifp->if_ibytes += m->m_pkthdr.len;
1164
1165 if (scifp->if_flags & IFF_MONITOR) {
1166 m_freem(m);
1167 m = NULL;
1168 }
1169 }
1170
1171 LAGG_RUNLOCK(sc);
1172 return (m);
1173 }
1174
1175 static int
1176 lagg_media_change(struct ifnet *ifp)
1177 {
1178 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1179
1180 if (sc->sc_ifflags & IFF_DEBUG)
1181 printf("%s\n", __func__);
1182
1183 /* Ignore */
1184 return (0);
1185 }
1186
1187 static void
1188 lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1189 {
1190 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1191 struct lagg_port *lp;
1192
1193 imr->ifm_status = IFM_AVALID;
1194 imr->ifm_active = IFM_ETHER | IFM_AUTO;
1195
1196 LAGG_RLOCK(sc);
1197 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1198 if (LAGG_PORTACTIVE(lp))
1199 imr->ifm_status |= IFM_ACTIVE;
1200 }
1201 LAGG_RUNLOCK(sc);
1202 }
1203
1204 static void
1205 lagg_linkstate(struct lagg_softc *sc)
1206 {
1207 struct lagg_port *lp;
1208 int new_link = LINK_STATE_DOWN;
1209
1210 /* Our link is considered up if at least one of our ports is active */
1211 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1212 if (lp->lp_link_state == LINK_STATE_UP) {
1213 new_link = LINK_STATE_UP;
1214 break;
1215 }
1216 }
1217 if_link_state_change(sc->sc_ifp, new_link);
1218 }
1219
1220 static void
1221 lagg_port_state(struct ifnet *ifp, int state)
1222 {
1223 struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
1224 struct lagg_softc *sc = NULL;
1225
1226 if (lp != NULL)
1227 sc = lp->lp_softc;
1228 if (sc == NULL)
1229 return;
1230
1231 LAGG_WLOCK(sc);
1232 lagg_linkstate(sc);
1233 if (sc->sc_linkstate != NULL)
1234 (*sc->sc_linkstate)(lp);
1235 LAGG_WUNLOCK(sc);
1236 }
1237
1238 struct lagg_port *
1239 lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
1240 {
1241 struct lagg_port *lp_next, *rval = NULL;
1242 // int new_link = LINK_STATE_DOWN;
1243
1244 LAGG_RLOCK_ASSERT(sc);
1245 /*
1246 * Search a port which reports an active link state.
1247 */
1248
1249 if (lp == NULL)
1250 goto search;
1251 if (LAGG_PORTACTIVE(lp)) {
1252 rval = lp;
1253 goto found;
1254 }
1255 if ((lp_next = SLIST_NEXT(lp, lp_entries)) != NULL &&
1256 LAGG_PORTACTIVE(lp_next)) {
1257 rval = lp_next;
1258 goto found;
1259 }
1260
1261 search:
1262 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1263 if (LAGG_PORTACTIVE(lp_next)) {
1264 rval = lp_next;
1265 goto found;
1266 }
1267 }
1268
1269 found:
1270 if (rval != NULL) {
1271 /*
1272 * The IEEE 802.1D standard assumes that a lagg with
1273 * multiple ports is always full duplex. This is valid
1274 * for load sharing laggs and if at least two links
1275 * are active. Unfortunately, checking the latter would
1276 * be too expensive at this point.
1277 XXX
1278 if ((sc->sc_capabilities & IFCAP_LAGG_FULLDUPLEX) &&
1279 (sc->sc_count > 1))
1280 new_link = LINK_STATE_FULL_DUPLEX;
1281 else
1282 new_link = rval->lp_link_state;
1283 */
1284 }
1285
1286 return (rval);
1287 }
1288
1289 static const void *
1290 lagg_gethdr(struct mbuf *m, u_int off, u_int len, void *buf)
1291 {
1292 if (m->m_pkthdr.len < (off + len)) {
1293 return (NULL);
1294 } else if (m->m_len < (off + len)) {
1295 m_copydata(m, off, len, buf);
1296 return (buf);
1297 }
1298 return (mtod(m, char *) + off);
1299 }
1300
1301 uint32_t
1302 lagg_hashmbuf(struct mbuf *m, uint32_t key)
1303 {
1304 uint16_t etype;
1305 uint32_t p = 0;
1306 int off;
1307 struct ether_header *eh;
1308 struct ether_vlan_header vlanbuf;
1309 const struct ether_vlan_header *vlan;
1310 #ifdef INET
1311 const struct ip *ip;
1312 struct ip ipbuf;
1313 #endif
1314 #ifdef INET6
1315 const struct ip6_hdr *ip6;
1316 struct ip6_hdr ip6buf;
1317 uint32_t flow;
1318 #endif
1319
1320 off = sizeof(*eh);
1321 if (m->m_len < off)
1322 goto out;
1323 eh = mtod(m, struct ether_header *);
1324 etype = ntohs(eh->ether_type);
1325 p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, key);
1326 p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p);
1327
1328 /* Special handling for encapsulating VLAN frames */
1329 if (m->m_flags & M_VLANTAG) {
1330 p = hash32_buf(&m->m_pkthdr.ether_vtag,
1331 sizeof(m->m_pkthdr.ether_vtag), p);
1332 } else if (etype == ETHERTYPE_VLAN) {
1333 vlan = lagg_gethdr(m, off, sizeof(*vlan), &vlanbuf);
1334 if (vlan == NULL)
1335 goto out;
1336
1337 p = hash32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p);
1338 etype = ntohs(vlan->evl_proto);
1339 off += sizeof(*vlan) - sizeof(*eh);
1340 }
1341
1342 switch (etype) {
1343 #ifdef INET
1344 case ETHERTYPE_IP:
1345 ip = lagg_gethdr(m, off, sizeof(*ip), &ipbuf);
1346 if (ip == NULL)
1347 goto out;
1348
1349 p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p);
1350 p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p);
1351 break;
1352 #endif
1353 #ifdef INET6
1354 case ETHERTYPE_IPV6:
1355 ip6 = lagg_gethdr(m, off, sizeof(*ip6), &ip6buf);
1356 if (ip6 == NULL)
1357 goto out;
1358
1359 p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p);
1360 p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p);
1361 flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK;
1362 p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */
1363 break;
1364 #endif
1365 }
1366 out:
1367 return (p);
1368 }
1369
1370 int
1371 lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
1372 {
1373 int error = 0;
1374
1375 IFQ_HANDOFF(ifp, m, error);
1376 if (error)
1377 ifp->if_oerrors++;
1378 return (error);
1379 }
1380
1381 /*
1382 * Simple round robin aggregation
1383 */
1384
1385 static int
1386 lagg_rr_attach(struct lagg_softc *sc)
1387 {
1388 sc->sc_detach = lagg_rr_detach;
1389 sc->sc_start = lagg_rr_start;
1390 sc->sc_input = lagg_rr_input;
1391 sc->sc_port_create = NULL;
1392 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1393 sc->sc_seq = 0;
1394
1395 return (0);
1396 }
1397
1398 static int
1399 lagg_rr_detach(struct lagg_softc *sc)
1400 {
1401 return (0);
1402 }
1403
1404 static int
1405 lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
1406 {
1407 struct lagg_port *lp;
1408 uint32_t p;
1409
1410 p = atomic_fetchadd_32(&sc->sc_seq, 1);
1411 p %= sc->sc_count;
1412 lp = SLIST_FIRST(&sc->sc_ports);
1413 while (p--)
1414 lp = SLIST_NEXT(lp, lp_entries);
1415
1416 /*
1417 * Check the port's link state. This will return the next active
1418 * port if the link is down or the port is NULL.
1419 */
1420 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1421 m_freem(m);
1422 return (ENOENT);
1423 }
1424
1425 /* Send mbuf */
1426 return (lagg_enqueue(lp->lp_ifp, m));
1427 }
1428
1429 static struct mbuf *
1430 lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1431 {
1432 struct ifnet *ifp = sc->sc_ifp;
1433
1434 /* Just pass in the packet to our lagg device */
1435 m->m_pkthdr.rcvif = ifp;
1436
1437 return (m);
1438 }
1439
1440 /*
1441 * Active failover
1442 */
1443
1444 static int
1445 lagg_fail_attach(struct lagg_softc *sc)
1446 {
1447 sc->sc_detach = lagg_fail_detach;
1448 sc->sc_start = lagg_fail_start;
1449 sc->sc_input = lagg_fail_input;
1450 sc->sc_port_create = NULL;
1451 sc->sc_port_destroy = NULL;
1452
1453 return (0);
1454 }
1455
1456 static int
1457 lagg_fail_detach(struct lagg_softc *sc)
1458 {
1459 return (0);
1460 }
1461
1462 static int
1463 lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
1464 {
1465 struct lagg_port *lp;
1466
1467 /* Use the master port if active or the next available port */
1468 if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
1469 m_freem(m);
1470 return (ENOENT);
1471 }
1472
1473 /* Send mbuf */
1474 return (lagg_enqueue(lp->lp_ifp, m));
1475 }
1476
1477 static struct mbuf *
1478 lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1479 {
1480 struct ifnet *ifp = sc->sc_ifp;
1481 struct lagg_port *tmp_tp;
1482
1483 if (lp == sc->sc_primary) {
1484 m->m_pkthdr.rcvif = ifp;
1485 return (m);
1486 }
1487
1488 if (!LAGG_PORTACTIVE(sc->sc_primary)) {
1489 tmp_tp = lagg_link_active(sc, sc->sc_primary);
1490 /*
1491 * If tmp_tp is null, we've recieved a packet when all
1492 * our links are down. Weird, but process it anyways.
1493 */
1494 if ((tmp_tp == NULL || tmp_tp == lp)) {
1495 m->m_pkthdr.rcvif = ifp;
1496 return (m);
1497 }
1498 }
1499
1500 m_freem(m);
1501 return (NULL);
1502 }
1503
1504 /*
1505 * Loadbalancing
1506 */
1507
1508 static int
1509 lagg_lb_attach(struct lagg_softc *sc)
1510 {
1511 struct lagg_port *lp;
1512 struct lagg_lb *lb;
1513
1514 if ((lb = (struct lagg_lb *)malloc(sizeof(struct lagg_lb),
1515 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
1516 return (ENOMEM);
1517
1518 sc->sc_detach = lagg_lb_detach;
1519 sc->sc_start = lagg_lb_start;
1520 sc->sc_input = lagg_lb_input;
1521 sc->sc_port_create = lagg_lb_port_create;
1522 sc->sc_port_destroy = lagg_lb_port_destroy;
1523 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1524
1525 lb->lb_key = arc4random();
1526 sc->sc_psc = (caddr_t)lb;
1527
1528 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1529 lagg_lb_port_create(lp);
1530
1531 return (0);
1532 }
1533
1534 static int
1535 lagg_lb_detach(struct lagg_softc *sc)
1536 {
1537 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1538 if (lb != NULL)
1539 free(lb, M_DEVBUF);
1540 return (0);
1541 }
1542
1543 static int
1544 lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
1545 {
1546 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1547 struct lagg_port *lp_next;
1548 int i = 0;
1549
1550 bzero(&lb->lb_ports, sizeof(lb->lb_ports));
1551 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1552 if (lp_next == lp)
1553 continue;
1554 if (i >= LAGG_MAX_PORTS)
1555 return (EINVAL);
1556 if (sc->sc_ifflags & IFF_DEBUG)
1557 printf("%s: port %s at index %d\n",
1558 sc->sc_ifname, lp_next->lp_ifname, i);
1559 lb->lb_ports[i++] = lp_next;
1560 }
1561
1562 return (0);
1563 }
1564
1565 static int
1566 lagg_lb_port_create(struct lagg_port *lp)
1567 {
1568 struct lagg_softc *sc = lp->lp_softc;
1569 return (lagg_lb_porttable(sc, NULL));
1570 }
1571
1572 static void
1573 lagg_lb_port_destroy(struct lagg_port *lp)
1574 {
1575 struct lagg_softc *sc = lp->lp_softc;
1576 lagg_lb_porttable(sc, lp);
1577 }
1578
1579 static int
1580 lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
1581 {
1582 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1583 struct lagg_port *lp = NULL;
1584 uint32_t p = 0;
1585
1586 p = lagg_hashmbuf(m, lb->lb_key);
1587 p %= sc->sc_count;
1588 lp = lb->lb_ports[p];
1589
1590 /*
1591 * Check the port's link state. This will return the next active
1592 * port if the link is down or the port is NULL.
1593 */
1594 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1595 m_freem(m);
1596 return (ENOENT);
1597 }
1598
1599 /* Send mbuf */
1600 return (lagg_enqueue(lp->lp_ifp, m));
1601 }
1602
1603 static struct mbuf *
1604 lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1605 {
1606 struct ifnet *ifp = sc->sc_ifp;
1607
1608 /* Just pass in the packet to our lagg device */
1609 m->m_pkthdr.rcvif = ifp;
1610
1611 return (m);
1612 }
1613
1614 /*
1615 * 802.3ad LACP
1616 */
1617
1618 static int
1619 lagg_lacp_attach(struct lagg_softc *sc)
1620 {
1621 struct lagg_port *lp;
1622 int error;
1623
1624 sc->sc_detach = lagg_lacp_detach;
1625 sc->sc_port_create = lacp_port_create;
1626 sc->sc_port_destroy = lacp_port_destroy;
1627 sc->sc_linkstate = lacp_linkstate;
1628 sc->sc_start = lagg_lacp_start;
1629 sc->sc_input = lagg_lacp_input;
1630 sc->sc_init = lacp_init;
1631 sc->sc_stop = lacp_stop;
1632 sc->sc_lladdr = lagg_lacp_lladdr;
1633 sc->sc_req = lacp_req;
1634 sc->sc_portreq = lacp_portreq;
1635
1636 error = lacp_attach(sc);
1637 if (error)
1638 return (error);
1639
1640 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1641 lacp_port_create(lp);
1642
1643 return (error);
1644 }
1645
1646 static int
1647 lagg_lacp_detach(struct lagg_softc *sc)
1648 {
1649 struct lagg_port *lp;
1650 int error;
1651
1652 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1653 lacp_port_destroy(lp);
1654
1655 /* unlocking is safe here */
1656 LAGG_WUNLOCK(sc);
1657 error = lacp_detach(sc);
1658 LAGG_WLOCK(sc);
1659
1660 return (error);
1661 }
1662
1663 static void
1664 lagg_lacp_lladdr(struct lagg_softc *sc)
1665 {
1666 struct lagg_port *lp;
1667
1668 /* purge all the lacp ports */
1669 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1670 lacp_port_destroy(lp);
1671
1672 /* add them back in */
1673 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1674 lacp_port_create(lp);
1675 }
1676
1677 static int
1678 lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
1679 {
1680 struct lagg_port *lp;
1681
1682 lp = lacp_select_tx_port(sc, m);
1683 if (lp == NULL) {
1684 m_freem(m);
1685 return (EBUSY);
1686 }
1687
1688 /* Send mbuf */
1689 return (lagg_enqueue(lp->lp_ifp, m));
1690 }
1691
1692 static struct mbuf *
1693 lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1694 {
1695 struct ifnet *ifp = sc->sc_ifp;
1696 struct ether_header *eh;
1697 u_short etype;
1698
1699 eh = mtod(m, struct ether_header *);
1700 etype = ntohs(eh->ether_type);
1701
1702 /* Tap off LACP control messages */
1703 if (etype == ETHERTYPE_SLOW) {
1704 m = lacp_input(lp, m);
1705 if (m == NULL)
1706 return (NULL);
1707 }
1708
1709 /*
1710 * If the port is not collecting or not in the active aggregator then
1711 * free and return.
1712 */
1713 if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
1714 m_freem(m);
1715 return (NULL);
1716 }
1717
1718 m->m_pkthdr.rcvif = ifp;
1719 return (m);
1720 }
Cache object: e9f8483739ddc86a9f4341ce674dc37a
|