FreeBSD/Linux Kernel Cross Reference
sys/net/if_lagg.c
1 /* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __FBSDID("$FreeBSD$");
22
23 #include "opt_inet.h"
24 #include "opt_inet6.h"
25
26 #include <sys/param.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/mbuf.h>
30 #include <sys/queue.h>
31 #include <sys/socket.h>
32 #include <sys/sockio.h>
33 #include <sys/sysctl.h>
34 #include <sys/module.h>
35 #include <sys/priv.h>
36 #include <sys/systm.h>
37 #include <sys/proc.h>
38 #include <sys/hash.h>
39 #include <sys/lock.h>
40 #include <sys/rwlock.h>
41 #include <sys/taskqueue.h>
42
43 #include <net/ethernet.h>
44 #include <net/if.h>
45 #include <net/if_clone.h>
46 #include <net/if_arp.h>
47 #include <net/if_dl.h>
48 #include <net/if_llc.h>
49 #include <net/if_media.h>
50 #include <net/if_types.h>
51 #include <net/if_var.h>
52 #include <net/bpf.h>
53
54 #ifdef INET
55 #include <netinet/in.h>
56 #include <netinet/in_systm.h>
57 #include <netinet/if_ether.h>
58 #include <netinet/ip.h>
59 #endif
60
61 #ifdef INET6
62 #include <netinet/ip6.h>
63 #endif
64
65 #include <net/if_vlan_var.h>
66 #include <net/if_lagg.h>
67 #include <net/ieee8023ad_lacp.h>
68
69 /* Special flags we should propagate to the lagg ports. */
70 static struct {
71 int flag;
72 int (*func)(struct ifnet *, int);
73 } lagg_pflags[] = {
74 {IFF_PROMISC, ifpromisc},
75 {IFF_ALLMULTI, if_allmulti},
76 {0, NULL}
77 };
78
79 SLIST_HEAD(__trhead, lagg_softc) lagg_list; /* list of laggs */
80 static struct mtx lagg_list_mtx;
81 eventhandler_tag lagg_detach_cookie = NULL;
82
83 static int lagg_clone_create(struct if_clone *, int, caddr_t);
84 static void lagg_clone_destroy(struct ifnet *);
85 static void lagg_lladdr(struct lagg_softc *, uint8_t *);
86 static void lagg_capabilities(struct lagg_softc *);
87 static void lagg_port_lladdr(struct lagg_port *, uint8_t *);
88 static void lagg_port_setlladdr(void *, int);
89 static int lagg_port_create(struct lagg_softc *, struct ifnet *);
90 static int lagg_port_destroy(struct lagg_port *, int);
91 static struct mbuf *lagg_input(struct ifnet *, struct mbuf *);
92 static void lagg_linkstate(struct lagg_softc *);
93 static void lagg_port_state(struct ifnet *, int);
94 static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
95 static int lagg_port_output(struct ifnet *, struct mbuf *,
96 struct sockaddr *, struct rtentry *);
97 static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
98 static int lagg_port_checkstacking(struct lagg_softc *);
99 static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
100 static void lagg_init(void *);
101 static void lagg_stop(struct lagg_softc *);
102 static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
103 static int lagg_ether_setmulti(struct lagg_softc *);
104 static int lagg_ether_cmdmulti(struct lagg_port *, int);
105 static int lagg_setflag(struct lagg_port *, int, int,
106 int (*func)(struct ifnet *, int));
107 static int lagg_setflags(struct lagg_port *, int status);
108 static void lagg_start(struct ifnet *);
109 static int lagg_media_change(struct ifnet *);
110 static void lagg_media_status(struct ifnet *, struct ifmediareq *);
111 static struct lagg_port *lagg_link_active(struct lagg_softc *,
112 struct lagg_port *);
113 static const void *lagg_gethdr(struct mbuf *, u_int, u_int, void *);
114
115 IFC_SIMPLE_DECLARE(lagg, 0);
116
117 /* Simple round robin */
118 static int lagg_rr_attach(struct lagg_softc *);
119 static int lagg_rr_detach(struct lagg_softc *);
120 static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
121 static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
122 struct mbuf *);
123
124 /* Active failover */
125 static int lagg_fail_attach(struct lagg_softc *);
126 static int lagg_fail_detach(struct lagg_softc *);
127 static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
128 static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
129 struct mbuf *);
130
131 /* Loadbalancing */
132 static int lagg_lb_attach(struct lagg_softc *);
133 static int lagg_lb_detach(struct lagg_softc *);
134 static int lagg_lb_port_create(struct lagg_port *);
135 static void lagg_lb_port_destroy(struct lagg_port *);
136 static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
137 static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
138 struct mbuf *);
139 static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
140
141 /* 802.3ad LACP */
142 static int lagg_lacp_attach(struct lagg_softc *);
143 static int lagg_lacp_detach(struct lagg_softc *);
144 static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
145 static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
146 struct mbuf *);
147 static void lagg_lacp_lladdr(struct lagg_softc *);
148
149 /* lagg protocol table */
150 static const struct {
151 int ti_proto;
152 int (*ti_attach)(struct lagg_softc *);
153 } lagg_protos[] = {
154 { LAGG_PROTO_ROUNDROBIN, lagg_rr_attach },
155 { LAGG_PROTO_FAILOVER, lagg_fail_attach },
156 { LAGG_PROTO_LOADBALANCE, lagg_lb_attach },
157 { LAGG_PROTO_ETHERCHANNEL, lagg_lb_attach },
158 { LAGG_PROTO_LACP, lagg_lacp_attach },
159 { LAGG_PROTO_NONE, NULL }
160 };
161
162 SYSCTL_DECL(_net_link);
163 SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW, 0, "Link Aggregation");
164
165 static int lagg_failover_rx_all = 0; /* Allow input on any failover links */
166 SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW,
167 &lagg_failover_rx_all, 0,
168 "Accept input from any interface in a failover lagg");
169
170 static int
171 lagg_modevent(module_t mod, int type, void *data)
172 {
173
174 switch (type) {
175 case MOD_LOAD:
176 mtx_init(&lagg_list_mtx, "if_lagg list", NULL, MTX_DEF);
177 SLIST_INIT(&lagg_list);
178 if_clone_attach(&lagg_cloner);
179 lagg_input_p = lagg_input;
180 lagg_linkstate_p = lagg_port_state;
181 lagg_detach_cookie = EVENTHANDLER_REGISTER(
182 ifnet_departure_event, lagg_port_ifdetach, NULL,
183 EVENTHANDLER_PRI_ANY);
184 break;
185 case MOD_UNLOAD:
186 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
187 lagg_detach_cookie);
188 if_clone_detach(&lagg_cloner);
189 lagg_input_p = NULL;
190 lagg_linkstate_p = NULL;
191 mtx_destroy(&lagg_list_mtx);
192 break;
193 default:
194 return (EOPNOTSUPP);
195 }
196 return (0);
197 }
198
199 static moduledata_t lagg_mod = {
200 "if_lagg",
201 lagg_modevent,
202 0
203 };
204
205 DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
206 MODULE_VERSION(if_lagg, 1);
207
208 static int
209 lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
210 {
211 struct lagg_softc *sc;
212 struct ifnet *ifp;
213 int i, error = 0;
214 static const u_char eaddr[6]; /* 00:00:00:00:00:00 */
215
216 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
217 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
218 if (ifp == NULL) {
219 free(sc, M_DEVBUF);
220 return (ENOSPC);
221 }
222
223 sc->sc_proto = LAGG_PROTO_NONE;
224 for (i = 0; lagg_protos[i].ti_proto != LAGG_PROTO_NONE; i++) {
225 if (lagg_protos[i].ti_proto == LAGG_PROTO_DEFAULT) {
226 sc->sc_proto = lagg_protos[i].ti_proto;
227 if ((error = lagg_protos[i].ti_attach(sc)) != 0) {
228 if_free_type(ifp, IFT_ETHER);
229 free(sc, M_DEVBUF);
230 return (error);
231 }
232 break;
233 }
234 }
235 LAGG_LOCK_INIT(sc);
236 SLIST_INIT(&sc->sc_ports);
237 TASK_INIT(&sc->sc_lladdr_task, 0, lagg_port_setlladdr, sc);
238
239 /* Initialise pseudo media types */
240 ifmedia_init(&sc->sc_media, 0, lagg_media_change,
241 lagg_media_status);
242 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
243 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
244
245 if_initname(ifp, ifc->ifc_name, unit);
246 ifp->if_type = IFT_ETHER;
247 ifp->if_softc = sc;
248 ifp->if_start = lagg_start;
249 ifp->if_init = lagg_init;
250 ifp->if_ioctl = lagg_ioctl;
251 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
252
253 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
254 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
255 IFQ_SET_READY(&ifp->if_snd);
256
257 /*
258 * Attach as an ordinary ethernet device, childs will be attached
259 * as special device IFT_IEEE8023ADLAG.
260 */
261 ether_ifattach(ifp, eaddr);
262
263 /* Insert into the global list of laggs */
264 mtx_lock(&lagg_list_mtx);
265 SLIST_INSERT_HEAD(&lagg_list, sc, sc_entries);
266 mtx_unlock(&lagg_list_mtx);
267
268 return (0);
269 }
270
271 static void
272 lagg_clone_destroy(struct ifnet *ifp)
273 {
274 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
275 struct lagg_port *lp;
276
277 LAGG_WLOCK(sc);
278
279 lagg_stop(sc);
280 ifp->if_flags &= ~IFF_UP;
281
282 /* Shutdown and remove lagg ports */
283 while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL)
284 lagg_port_destroy(lp, 1);
285 /* Unhook the aggregation protocol */
286 (*sc->sc_detach)(sc);
287
288 LAGG_WUNLOCK(sc);
289
290 ifmedia_removeall(&sc->sc_media);
291 ether_ifdetach(ifp);
292 if_free_type(ifp, IFT_ETHER);
293
294 mtx_lock(&lagg_list_mtx);
295 SLIST_REMOVE(&lagg_list, sc, lagg_softc, sc_entries);
296 mtx_unlock(&lagg_list_mtx);
297
298 taskqueue_drain(taskqueue_swi, &sc->sc_lladdr_task);
299 LAGG_LOCK_DESTROY(sc);
300 free(sc, M_DEVBUF);
301 }
302
303 static void
304 lagg_lladdr(struct lagg_softc *sc, uint8_t *lladdr)
305 {
306 struct ifnet *ifp = sc->sc_ifp;
307
308 if (memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
309 return;
310
311 bcopy(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN);
312 /* Let the protocol know the MAC has changed */
313 if (sc->sc_lladdr != NULL)
314 (*sc->sc_lladdr)(sc);
315 }
316
317 static void
318 lagg_capabilities(struct lagg_softc *sc)
319 {
320 struct lagg_port *lp;
321 int cap = ~0, ena = ~0;
322
323 LAGG_WLOCK_ASSERT(sc);
324
325 /* Get capabilities from the lagg ports */
326 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
327 cap &= lp->lp_ifp->if_capabilities;
328 ena &= lp->lp_ifp->if_capenable;
329 }
330 cap = (cap == ~0 ? 0 : cap);
331 ena = (ena == ~0 ? 0 : ena);
332
333 if (sc->sc_ifp->if_capabilities != cap ||
334 sc->sc_ifp->if_capenable != ena) {
335 sc->sc_ifp->if_capabilities = cap;
336 sc->sc_ifp->if_capenable = ena;
337 getmicrotime(&sc->sc_ifp->if_lastchange);
338
339 if (sc->sc_ifflags & IFF_DEBUG)
340 if_printf(sc->sc_ifp,
341 "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
342 }
343 }
344
345 static void
346 lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr)
347 {
348 struct lagg_softc *sc = lp->lp_softc;
349 struct ifnet *ifp = lp->lp_ifp;
350 struct lagg_llq *llq;
351 int pending = 0;
352
353 LAGG_WLOCK_ASSERT(sc);
354
355 if (lp->lp_detaching ||
356 memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
357 return;
358
359 /* Check to make sure its not already queued to be changed */
360 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
361 if (llq->llq_ifp == ifp) {
362 pending = 1;
363 break;
364 }
365 }
366
367 if (!pending) {
368 llq = malloc(sizeof(struct lagg_llq), M_DEVBUF, M_NOWAIT);
369 if (llq == NULL) /* XXX what to do */
370 return;
371 }
372
373 /* Update the lladdr even if pending, it may have changed */
374 llq->llq_ifp = ifp;
375 bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN);
376
377 if (!pending)
378 SLIST_INSERT_HEAD(&sc->sc_llq_head, llq, llq_entries);
379
380 taskqueue_enqueue(taskqueue_swi, &sc->sc_lladdr_task);
381 }
382
383 /*
384 * Set the interface MAC address from a taskqueue to avoid a LOR.
385 */
386 static void
387 lagg_port_setlladdr(void *arg, int pending)
388 {
389 struct lagg_softc *sc = (struct lagg_softc *)arg;
390 struct lagg_llq *llq, *head;
391 struct ifnet *ifp;
392 int error;
393
394 /* Grab a local reference of the queue and remove it from the softc */
395 LAGG_WLOCK(sc);
396 head = SLIST_FIRST(&sc->sc_llq_head);
397 SLIST_FIRST(&sc->sc_llq_head) = NULL;
398 LAGG_WUNLOCK(sc);
399
400 /*
401 * Traverse the queue and set the lladdr on each ifp. It is safe to do
402 * unlocked as we have the only reference to it.
403 */
404 for (llq = head; llq != NULL; llq = head) {
405 ifp = llq->llq_ifp;
406
407 /* Set the link layer address */
408 error = if_setlladdr(ifp, llq->llq_lladdr, ETHER_ADDR_LEN);
409 if (error)
410 printf("%s: setlladdr failed on %s\n", __func__,
411 ifp->if_xname);
412
413 head = SLIST_NEXT(llq, llq_entries);
414 free(llq, M_DEVBUF);
415 }
416 }
417
418 static int
419 lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
420 {
421 struct lagg_softc *sc_ptr;
422 struct lagg_port *lp;
423 int error = 0;
424
425 LAGG_WLOCK_ASSERT(sc);
426
427 /* Limit the maximal number of lagg ports */
428 if (sc->sc_count >= LAGG_MAX_PORTS)
429 return (ENOSPC);
430
431 /* Check if port has already been associated to a lagg */
432 if (ifp->if_lagg != NULL)
433 return (EBUSY);
434
435 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */
436 if (ifp->if_type != IFT_ETHER)
437 return (EPROTONOSUPPORT);
438
439 /* Allow the first Ethernet member to define the MTU */
440 if (SLIST_EMPTY(&sc->sc_ports))
441 sc->sc_ifp->if_mtu = ifp->if_mtu;
442 else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
443 if_printf(sc->sc_ifp, "invalid MTU for %s\n",
444 ifp->if_xname);
445 return (EINVAL);
446 }
447
448 if ((lp = malloc(sizeof(struct lagg_port),
449 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
450 return (ENOMEM);
451
452 /* Check if port is a stacked lagg */
453 mtx_lock(&lagg_list_mtx);
454 SLIST_FOREACH(sc_ptr, &lagg_list, sc_entries) {
455 if (ifp == sc_ptr->sc_ifp) {
456 mtx_unlock(&lagg_list_mtx);
457 free(lp, M_DEVBUF);
458 return (EINVAL);
459 /* XXX disable stacking for the moment, its untested
460 lp->lp_flags |= LAGG_PORT_STACK;
461 if (lagg_port_checkstacking(sc_ptr) >=
462 LAGG_MAX_STACKING) {
463 mtx_unlock(&lagg_list_mtx);
464 free(lp, M_DEVBUF);
465 return (E2BIG);
466 }
467 */
468 }
469 }
470 mtx_unlock(&lagg_list_mtx);
471
472 /* Change the interface type */
473 lp->lp_iftype = ifp->if_type;
474 ifp->if_type = IFT_IEEE8023ADLAG;
475 ifp->if_lagg = lp;
476 lp->lp_ioctl = ifp->if_ioctl;
477 ifp->if_ioctl = lagg_port_ioctl;
478 lp->lp_output = ifp->if_output;
479 ifp->if_output = lagg_port_output;
480
481 lp->lp_ifp = ifp;
482 lp->lp_softc = sc;
483
484 /* Save port link layer address */
485 bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
486
487 if (SLIST_EMPTY(&sc->sc_ports)) {
488 sc->sc_primary = lp;
489 lagg_lladdr(sc, IF_LLADDR(ifp));
490 } else {
491 /* Update link layer address for this port */
492 lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp));
493 }
494
495 /* Insert into the list of ports */
496 SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
497 sc->sc_count++;
498
499 /* Update lagg capabilities */
500 lagg_capabilities(sc);
501 lagg_linkstate(sc);
502
503 /* Add multicast addresses and interface flags to this port */
504 lagg_ether_cmdmulti(lp, 1);
505 lagg_setflags(lp, 1);
506
507 if (sc->sc_port_create != NULL)
508 error = (*sc->sc_port_create)(lp);
509 if (error) {
510 /* remove the port again, without calling sc_port_destroy */
511 lagg_port_destroy(lp, 0);
512 return (error);
513 }
514
515 return (error);
516 }
517
518 static int
519 lagg_port_checkstacking(struct lagg_softc *sc)
520 {
521 struct lagg_softc *sc_ptr;
522 struct lagg_port *lp;
523 int m = 0;
524
525 LAGG_WLOCK_ASSERT(sc);
526
527 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
528 if (lp->lp_flags & LAGG_PORT_STACK) {
529 sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
530 m = MAX(m, lagg_port_checkstacking(sc_ptr));
531 }
532 }
533
534 return (m + 1);
535 }
536
537 static int
538 lagg_port_destroy(struct lagg_port *lp, int runpd)
539 {
540 struct lagg_softc *sc = lp->lp_softc;
541 struct lagg_port *lp_ptr;
542 struct lagg_llq *llq;
543 struct ifnet *ifp = lp->lp_ifp;
544
545 LAGG_WLOCK_ASSERT(sc);
546
547 if (runpd && sc->sc_port_destroy != NULL)
548 (*sc->sc_port_destroy)(lp);
549
550 /*
551 * Remove multicast addresses and interface flags from this port and
552 * reset the MAC address, skip if the interface is being detached.
553 */
554 if (!lp->lp_detaching) {
555 lagg_ether_cmdmulti(lp, 0);
556 lagg_setflags(lp, 0);
557 lagg_port_lladdr(lp, lp->lp_lladdr);
558 }
559
560 /* Restore interface */
561 ifp->if_type = lp->lp_iftype;
562 ifp->if_ioctl = lp->lp_ioctl;
563 ifp->if_output = lp->lp_output;
564 ifp->if_lagg = NULL;
565
566 /* Finally, remove the port from the lagg */
567 SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
568 sc->sc_count--;
569
570 /* Update the primary interface */
571 if (lp == sc->sc_primary) {
572 uint8_t lladdr[ETHER_ADDR_LEN];
573
574 if ((lp_ptr = SLIST_FIRST(&sc->sc_ports)) == NULL) {
575 bzero(&lladdr, ETHER_ADDR_LEN);
576 } else {
577 bcopy(lp_ptr->lp_lladdr,
578 lladdr, ETHER_ADDR_LEN);
579 }
580 lagg_lladdr(sc, lladdr);
581 sc->sc_primary = lp_ptr;
582
583 /* Update link layer address for each port */
584 SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
585 lagg_port_lladdr(lp_ptr, lladdr);
586 }
587
588 /* Remove any pending lladdr changes from the queue */
589 if (lp->lp_detaching) {
590 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
591 if (llq->llq_ifp == ifp) {
592 SLIST_REMOVE(&sc->sc_llq_head, llq, lagg_llq,
593 llq_entries);
594 free(llq, M_DEVBUF);
595 break; /* Only appears once */
596 }
597 }
598 }
599
600 if (lp->lp_ifflags)
601 if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
602
603 free(lp, M_DEVBUF);
604
605 /* Update lagg capabilities */
606 lagg_capabilities(sc);
607 lagg_linkstate(sc);
608
609 return (0);
610 }
611
612 static int
613 lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
614 {
615 struct lagg_reqport *rp = (struct lagg_reqport *)data;
616 struct lagg_softc *sc;
617 struct lagg_port *lp = NULL;
618 int error = 0;
619
620 /* Should be checked by the caller */
621 if (ifp->if_type != IFT_IEEE8023ADLAG ||
622 (lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
623 goto fallback;
624
625 switch (cmd) {
626 case SIOCGLAGGPORT:
627 if (rp->rp_portname[0] == '\0' ||
628 ifunit(rp->rp_portname) != ifp) {
629 error = EINVAL;
630 break;
631 }
632
633 LAGG_RLOCK(sc);
634 if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
635 error = ENOENT;
636 LAGG_RUNLOCK(sc);
637 break;
638 }
639
640 lagg_port2req(lp, rp);
641 LAGG_RUNLOCK(sc);
642 break;
643
644 case SIOCSIFCAP:
645 if (lp->lp_ioctl == NULL) {
646 error = EINVAL;
647 break;
648 }
649 error = (*lp->lp_ioctl)(ifp, cmd, data);
650 if (error)
651 break;
652
653 /* Update lagg interface capabilities */
654 LAGG_WLOCK(sc);
655 lagg_capabilities(sc);
656 LAGG_WUNLOCK(sc);
657 break;
658
659 case SIOCSIFMTU:
660 /* Do not allow the MTU to be changed once joined */
661 error = EINVAL;
662 break;
663
664 default:
665 goto fallback;
666 }
667
668 return (error);
669
670 fallback:
671 if (lp->lp_ioctl != NULL)
672 return ((*lp->lp_ioctl)(ifp, cmd, data));
673
674 return (EINVAL);
675 }
676
677 static int
678 lagg_port_output(struct ifnet *ifp, struct mbuf *m,
679 struct sockaddr *dst, struct rtentry *rt0)
680 {
681 struct lagg_port *lp = ifp->if_lagg;
682 struct ether_header *eh;
683 short type = 0;
684
685 switch (dst->sa_family) {
686 case pseudo_AF_HDRCMPLT:
687 case AF_UNSPEC:
688 eh = (struct ether_header *)dst->sa_data;
689 type = eh->ether_type;
690 break;
691 }
692
693 /*
694 * Only allow ethernet types required to initiate or maintain the link,
695 * aggregated frames take a different path.
696 */
697 switch (ntohs(type)) {
698 case ETHERTYPE_PAE: /* EAPOL PAE/802.1x */
699 return ((*lp->lp_output)(ifp, m, dst, rt0));
700 }
701
702 /* drop any other frames */
703 m_freem(m);
704 return (EBUSY);
705 }
706
707 static void
708 lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
709 {
710 struct lagg_port *lp;
711 struct lagg_softc *sc;
712
713 if ((lp = ifp->if_lagg) == NULL)
714 return;
715
716 sc = lp->lp_softc;
717
718 LAGG_WLOCK(sc);
719 lp->lp_detaching = 1;
720 lagg_port_destroy(lp, 1);
721 LAGG_WUNLOCK(sc);
722 }
723
724 static void
725 lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
726 {
727 struct lagg_softc *sc = lp->lp_softc;
728
729 strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
730 strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
731 rp->rp_prio = lp->lp_prio;
732 rp->rp_flags = lp->lp_flags;
733 if (sc->sc_portreq != NULL)
734 (*sc->sc_portreq)(lp, (caddr_t)&rp->rp_psc);
735
736 /* Add protocol specific flags */
737 switch (sc->sc_proto) {
738 case LAGG_PROTO_FAILOVER:
739 if (lp == sc->sc_primary)
740 rp->rp_flags |= LAGG_PORT_MASTER;
741 if (lp == lagg_link_active(sc, sc->sc_primary))
742 rp->rp_flags |= LAGG_PORT_ACTIVE;
743 break;
744
745 case LAGG_PROTO_ROUNDROBIN:
746 case LAGG_PROTO_LOADBALANCE:
747 case LAGG_PROTO_ETHERCHANNEL:
748 if (LAGG_PORTACTIVE(lp))
749 rp->rp_flags |= LAGG_PORT_ACTIVE;
750 break;
751
752 case LAGG_PROTO_LACP:
753 /* LACP has a different definition of active */
754 if (lacp_isactive(lp))
755 rp->rp_flags |= LAGG_PORT_ACTIVE;
756 if (lacp_iscollecting(lp))
757 rp->rp_flags |= LAGG_PORT_COLLECTING;
758 if (lacp_isdistributing(lp))
759 rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
760 break;
761 }
762
763 }
764
765 static void
766 lagg_init(void *xsc)
767 {
768 struct lagg_softc *sc = (struct lagg_softc *)xsc;
769 struct lagg_port *lp;
770 struct ifnet *ifp = sc->sc_ifp;
771
772 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
773 return;
774
775 LAGG_WLOCK(sc);
776
777 ifp->if_drv_flags |= IFF_DRV_RUNNING;
778 /* Update the port lladdrs */
779 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
780 lagg_port_lladdr(lp, IF_LLADDR(ifp));
781
782 if (sc->sc_init != NULL)
783 (*sc->sc_init)(sc);
784
785 LAGG_WUNLOCK(sc);
786 }
787
788 static void
789 lagg_stop(struct lagg_softc *sc)
790 {
791 struct ifnet *ifp = sc->sc_ifp;
792
793 LAGG_WLOCK_ASSERT(sc);
794
795 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
796 return;
797
798 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
799
800 if (sc->sc_stop != NULL)
801 (*sc->sc_stop)(sc);
802 }
803
804 static int
805 lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
806 {
807 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
808 struct lagg_reqall *ra = (struct lagg_reqall *)data;
809 struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
810 struct ifreq *ifr = (struct ifreq *)data;
811 struct lagg_port *lp;
812 struct ifnet *tpif;
813 struct thread *td = curthread;
814 char *buf, *outbuf;
815 int count, buflen, len, error = 0;
816
817 bzero(&rpbuf, sizeof(rpbuf));
818
819 switch (cmd) {
820 case SIOCGLAGG:
821 LAGG_RLOCK(sc);
822 count = 0;
823 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
824 count++;
825 buflen = count * sizeof(struct lagg_reqport);
826 LAGG_RUNLOCK(sc);
827
828 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
829
830 LAGG_RLOCK(sc);
831 ra->ra_proto = sc->sc_proto;
832 if (sc->sc_req != NULL)
833 (*sc->sc_req)(sc, (caddr_t)&ra->ra_psc);
834
835 count = 0;
836 buf = outbuf;
837 len = min(ra->ra_size, buflen);
838 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
839 if (len < sizeof(rpbuf))
840 break;
841
842 lagg_port2req(lp, &rpbuf);
843 memcpy(buf, &rpbuf, sizeof(rpbuf));
844 count++;
845 buf += sizeof(rpbuf);
846 len -= sizeof(rpbuf);
847 }
848 LAGG_RUNLOCK(sc);
849 ra->ra_ports = count;
850 ra->ra_size = count * sizeof(rpbuf);
851 error = copyout(outbuf, ra->ra_port, ra->ra_size);
852 free(outbuf, M_TEMP);
853 break;
854 case SIOCSLAGG:
855 error = priv_check(td, PRIV_NET_LAGG);
856 if (error)
857 break;
858 if (ra->ra_proto >= LAGG_PROTO_MAX) {
859 error = EPROTONOSUPPORT;
860 break;
861 }
862 if (sc->sc_proto != LAGG_PROTO_NONE) {
863 LAGG_WLOCK(sc);
864 error = sc->sc_detach(sc);
865 /* Reset protocol and pointers */
866 sc->sc_proto = LAGG_PROTO_NONE;
867 sc->sc_detach = NULL;
868 sc->sc_start = NULL;
869 sc->sc_input = NULL;
870 sc->sc_port_create = NULL;
871 sc->sc_port_destroy = NULL;
872 sc->sc_linkstate = NULL;
873 sc->sc_init = NULL;
874 sc->sc_stop = NULL;
875 sc->sc_lladdr = NULL;
876 sc->sc_req = NULL;
877 sc->sc_portreq = NULL;
878 LAGG_WUNLOCK(sc);
879 }
880 if (error != 0)
881 break;
882 for (int i = 0; i < (sizeof(lagg_protos) /
883 sizeof(lagg_protos[0])); i++) {
884 if (lagg_protos[i].ti_proto == ra->ra_proto) {
885 if (sc->sc_ifflags & IFF_DEBUG)
886 printf("%s: using proto %u\n",
887 sc->sc_ifname,
888 lagg_protos[i].ti_proto);
889 LAGG_WLOCK(sc);
890 sc->sc_proto = lagg_protos[i].ti_proto;
891 if (sc->sc_proto != LAGG_PROTO_NONE)
892 error = lagg_protos[i].ti_attach(sc);
893 LAGG_WUNLOCK(sc);
894 return (error);
895 }
896 }
897 error = EPROTONOSUPPORT;
898 break;
899 case SIOCGLAGGPORT:
900 if (rp->rp_portname[0] == '\0' ||
901 (tpif = ifunit(rp->rp_portname)) == NULL) {
902 error = EINVAL;
903 break;
904 }
905
906 LAGG_RLOCK(sc);
907 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
908 lp->lp_softc != sc) {
909 error = ENOENT;
910 LAGG_RUNLOCK(sc);
911 break;
912 }
913
914 lagg_port2req(lp, rp);
915 LAGG_RUNLOCK(sc);
916 break;
917 case SIOCSLAGGPORT:
918 error = priv_check(td, PRIV_NET_LAGG);
919 if (error)
920 break;
921 if (rp->rp_portname[0] == '\0' ||
922 (tpif = ifunit(rp->rp_portname)) == NULL) {
923 error = EINVAL;
924 break;
925 }
926 LAGG_WLOCK(sc);
927 error = lagg_port_create(sc, tpif);
928 LAGG_WUNLOCK(sc);
929 break;
930 case SIOCSLAGGDELPORT:
931 error = priv_check(td, PRIV_NET_LAGG);
932 if (error)
933 break;
934 if (rp->rp_portname[0] == '\0' ||
935 (tpif = ifunit(rp->rp_portname)) == NULL) {
936 error = EINVAL;
937 break;
938 }
939
940 LAGG_WLOCK(sc);
941 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
942 lp->lp_softc != sc) {
943 error = ENOENT;
944 LAGG_WUNLOCK(sc);
945 break;
946 }
947
948 error = lagg_port_destroy(lp, 1);
949 LAGG_WUNLOCK(sc);
950 break;
951 case SIOCSIFFLAGS:
952 /* Set flags on ports too */
953 LAGG_WLOCK(sc);
954 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
955 lagg_setflags(lp, 1);
956 }
957 LAGG_WUNLOCK(sc);
958
959 if (!(ifp->if_flags & IFF_UP) &&
960 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
961 /*
962 * If interface is marked down and it is running,
963 * then stop and disable it.
964 */
965 LAGG_WLOCK(sc);
966 lagg_stop(sc);
967 LAGG_WUNLOCK(sc);
968 } else if ((ifp->if_flags & IFF_UP) &&
969 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
970 /*
971 * If interface is marked up and it is stopped, then
972 * start it.
973 */
974 (*ifp->if_init)(sc);
975 }
976 break;
977 case SIOCADDMULTI:
978 case SIOCDELMULTI:
979 LAGG_WLOCK(sc);
980 error = lagg_ether_setmulti(sc);
981 LAGG_WUNLOCK(sc);
982 break;
983 case SIOCSIFMEDIA:
984 case SIOCGIFMEDIA:
985 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
986 break;
987
988 case SIOCSIFCAP:
989 case SIOCSIFMTU:
990 /* Do not allow the MTU or caps to be directly changed */
991 error = EINVAL;
992 break;
993
994 default:
995 error = ether_ioctl(ifp, cmd, data);
996 break;
997 }
998 return (error);
999 }
1000
1001 static int
1002 lagg_ether_setmulti(struct lagg_softc *sc)
1003 {
1004 struct lagg_port *lp;
1005
1006 LAGG_WLOCK_ASSERT(sc);
1007
1008 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1009 /* First, remove any existing filter entries. */
1010 lagg_ether_cmdmulti(lp, 0);
1011 /* copy all addresses from the lagg interface to the port */
1012 lagg_ether_cmdmulti(lp, 1);
1013 }
1014 return (0);
1015 }
1016
1017 static int
1018 lagg_ether_cmdmulti(struct lagg_port *lp, int set)
1019 {
1020 struct lagg_softc *sc = lp->lp_softc;
1021 struct ifnet *ifp = lp->lp_ifp;
1022 struct ifnet *scifp = sc->sc_ifp;
1023 struct lagg_mc *mc;
1024 struct ifmultiaddr *ifma, *rifma = NULL;
1025 struct sockaddr_dl sdl;
1026 int error;
1027
1028 LAGG_WLOCK_ASSERT(sc);
1029
1030 bzero((char *)&sdl, sizeof(sdl));
1031 sdl.sdl_len = sizeof(sdl);
1032 sdl.sdl_family = AF_LINK;
1033 sdl.sdl_type = IFT_ETHER;
1034 sdl.sdl_alen = ETHER_ADDR_LEN;
1035 sdl.sdl_index = ifp->if_index;
1036
1037 if (set) {
1038 TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
1039 if (ifma->ifma_addr->sa_family != AF_LINK)
1040 continue;
1041 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1042 LLADDR(&sdl), ETHER_ADDR_LEN);
1043
1044 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
1045 if (error)
1046 return (error);
1047 mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT);
1048 if (mc == NULL)
1049 return (ENOMEM);
1050 mc->mc_ifma = rifma;
1051 SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
1052 }
1053 } else {
1054 while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
1055 SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
1056 if_delmulti_ifma(mc->mc_ifma);
1057 free(mc, M_DEVBUF);
1058 }
1059 }
1060 return (0);
1061 }
1062
1063 /* Handle a ref counted flag that should be set on the lagg port as well */
1064 static int
1065 lagg_setflag(struct lagg_port *lp, int flag, int status,
1066 int (*func)(struct ifnet *, int))
1067 {
1068 struct lagg_softc *sc = lp->lp_softc;
1069 struct ifnet *scifp = sc->sc_ifp;
1070 struct ifnet *ifp = lp->lp_ifp;
1071 int error;
1072
1073 LAGG_WLOCK_ASSERT(sc);
1074
1075 status = status ? (scifp->if_flags & flag) : 0;
1076 /* Now "status" contains the flag value or 0 */
1077
1078 /*
1079 * See if recorded ports status is different from what
1080 * we want it to be. If it is, flip it. We record ports
1081 * status in lp_ifflags so that we won't clear ports flag
1082 * we haven't set. In fact, we don't clear or set ports
1083 * flags directly, but get or release references to them.
1084 * That's why we can be sure that recorded flags still are
1085 * in accord with actual ports flags.
1086 */
1087 if (status != (lp->lp_ifflags & flag)) {
1088 error = (*func)(ifp, status);
1089 if (error)
1090 return (error);
1091 lp->lp_ifflags &= ~flag;
1092 lp->lp_ifflags |= status;
1093 }
1094 return (0);
1095 }
1096
1097 /*
1098 * Handle IFF_* flags that require certain changes on the lagg port
1099 * if "status" is true, update ports flags respective to the lagg
1100 * if "status" is false, forcedly clear the flags set on port.
1101 */
1102 static int
1103 lagg_setflags(struct lagg_port *lp, int status)
1104 {
1105 int error, i;
1106
1107 for (i = 0; lagg_pflags[i].flag; i++) {
1108 error = lagg_setflag(lp, lagg_pflags[i].flag,
1109 status, lagg_pflags[i].func);
1110 if (error)
1111 return (error);
1112 }
1113 return (0);
1114 }
1115
1116 static void
1117 lagg_start(struct ifnet *ifp)
1118 {
1119 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1120 struct mbuf *m;
1121 int error = 0;
1122
1123 LAGG_RLOCK(sc);
1124 for (;; error = 0) {
1125 IFQ_DEQUEUE(&ifp->if_snd, m);
1126 if (m == NULL)
1127 break;
1128
1129 ETHER_BPF_MTAP(ifp, m);
1130
1131 /* We need a Tx algorithm and at least one port */
1132 if (sc->sc_proto != LAGG_PROTO_NONE && sc->sc_count)
1133 error = (*sc->sc_start)(sc, m);
1134 else
1135 m_freem(m);
1136
1137 if (error == 0)
1138 ifp->if_opackets++;
1139 else
1140 ifp->if_oerrors++;
1141 }
1142 LAGG_RUNLOCK(sc);
1143
1144 return;
1145 }
1146
1147 static struct mbuf *
1148 lagg_input(struct ifnet *ifp, struct mbuf *m)
1149 {
1150 struct lagg_port *lp = ifp->if_lagg;
1151 struct lagg_softc *sc = lp->lp_softc;
1152 struct ifnet *scifp = sc->sc_ifp;
1153
1154 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1155 (lp->lp_flags & LAGG_PORT_DISABLED) ||
1156 sc->sc_proto == LAGG_PROTO_NONE) {
1157 m_freem(m);
1158 return (NULL);
1159 }
1160
1161 LAGG_RLOCK(sc);
1162 ETHER_BPF_MTAP(scifp, m);
1163
1164 m = (*sc->sc_input)(sc, lp, m);
1165
1166 if (m != NULL) {
1167 scifp->if_ipackets++;
1168 scifp->if_ibytes += m->m_pkthdr.len;
1169
1170 if (scifp->if_flags & IFF_MONITOR) {
1171 m_freem(m);
1172 m = NULL;
1173 }
1174 }
1175
1176 LAGG_RUNLOCK(sc);
1177 return (m);
1178 }
1179
1180 static int
1181 lagg_media_change(struct ifnet *ifp)
1182 {
1183 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1184
1185 if (sc->sc_ifflags & IFF_DEBUG)
1186 printf("%s\n", __func__);
1187
1188 /* Ignore */
1189 return (0);
1190 }
1191
1192 static void
1193 lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1194 {
1195 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1196 struct lagg_port *lp;
1197
1198 imr->ifm_status = IFM_AVALID;
1199 imr->ifm_active = IFM_ETHER | IFM_AUTO;
1200
1201 LAGG_RLOCK(sc);
1202 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1203 if (LAGG_PORTACTIVE(lp))
1204 imr->ifm_status |= IFM_ACTIVE;
1205 }
1206 LAGG_RUNLOCK(sc);
1207 }
1208
1209 static void
1210 lagg_linkstate(struct lagg_softc *sc)
1211 {
1212 struct lagg_port *lp;
1213 int new_link = LINK_STATE_DOWN;
1214
1215 /* Our link is considered up if at least one of our ports is active */
1216 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1217 if (lp->lp_link_state == LINK_STATE_UP) {
1218 new_link = LINK_STATE_UP;
1219 break;
1220 }
1221 }
1222 if_link_state_change(sc->sc_ifp, new_link);
1223 }
1224
1225 static void
1226 lagg_port_state(struct ifnet *ifp, int state)
1227 {
1228 struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
1229 struct lagg_softc *sc = NULL;
1230
1231 if (lp != NULL)
1232 sc = lp->lp_softc;
1233 if (sc == NULL)
1234 return;
1235
1236 LAGG_WLOCK(sc);
1237 lagg_linkstate(sc);
1238 if (sc->sc_linkstate != NULL)
1239 (*sc->sc_linkstate)(lp);
1240 LAGG_WUNLOCK(sc);
1241 }
1242
1243 struct lagg_port *
1244 lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
1245 {
1246 struct lagg_port *lp_next, *rval = NULL;
1247 // int new_link = LINK_STATE_DOWN;
1248
1249 LAGG_RLOCK_ASSERT(sc);
1250 /*
1251 * Search a port which reports an active link state.
1252 */
1253
1254 if (lp == NULL)
1255 goto search;
1256 if (LAGG_PORTACTIVE(lp)) {
1257 rval = lp;
1258 goto found;
1259 }
1260 if ((lp_next = SLIST_NEXT(lp, lp_entries)) != NULL &&
1261 LAGG_PORTACTIVE(lp_next)) {
1262 rval = lp_next;
1263 goto found;
1264 }
1265
1266 search:
1267 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1268 if (LAGG_PORTACTIVE(lp_next)) {
1269 rval = lp_next;
1270 goto found;
1271 }
1272 }
1273
1274 found:
1275 if (rval != NULL) {
1276 /*
1277 * The IEEE 802.1D standard assumes that a lagg with
1278 * multiple ports is always full duplex. This is valid
1279 * for load sharing laggs and if at least two links
1280 * are active. Unfortunately, checking the latter would
1281 * be too expensive at this point.
1282 XXX
1283 if ((sc->sc_capabilities & IFCAP_LAGG_FULLDUPLEX) &&
1284 (sc->sc_count > 1))
1285 new_link = LINK_STATE_FULL_DUPLEX;
1286 else
1287 new_link = rval->lp_link_state;
1288 */
1289 }
1290
1291 return (rval);
1292 }
1293
1294 static const void *
1295 lagg_gethdr(struct mbuf *m, u_int off, u_int len, void *buf)
1296 {
1297 if (m->m_pkthdr.len < (off + len)) {
1298 return (NULL);
1299 } else if (m->m_len < (off + len)) {
1300 m_copydata(m, off, len, buf);
1301 return (buf);
1302 }
1303 return (mtod(m, char *) + off);
1304 }
1305
1306 uint32_t
1307 lagg_hashmbuf(struct mbuf *m, uint32_t key)
1308 {
1309 uint16_t etype;
1310 uint32_t p = 0;
1311 int off;
1312 struct ether_header *eh;
1313 struct ether_vlan_header vlanbuf;
1314 const struct ether_vlan_header *vlan;
1315 #ifdef INET
1316 const struct ip *ip;
1317 struct ip ipbuf;
1318 #endif
1319 #ifdef INET6
1320 const struct ip6_hdr *ip6;
1321 struct ip6_hdr ip6buf;
1322 uint32_t flow;
1323 #endif
1324
1325 off = sizeof(*eh);
1326 if (m->m_len < off)
1327 goto out;
1328 eh = mtod(m, struct ether_header *);
1329 etype = ntohs(eh->ether_type);
1330 p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, key);
1331 p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p);
1332
1333 /* Special handling for encapsulating VLAN frames */
1334 if (m->m_flags & M_VLANTAG) {
1335 p = hash32_buf(&m->m_pkthdr.ether_vtag,
1336 sizeof(m->m_pkthdr.ether_vtag), p);
1337 } else if (etype == ETHERTYPE_VLAN) {
1338 vlan = lagg_gethdr(m, off, sizeof(*vlan), &vlanbuf);
1339 if (vlan == NULL)
1340 goto out;
1341
1342 p = hash32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p);
1343 etype = ntohs(vlan->evl_proto);
1344 off += sizeof(*vlan) - sizeof(*eh);
1345 }
1346
1347 switch (etype) {
1348 #ifdef INET
1349 case ETHERTYPE_IP:
1350 ip = lagg_gethdr(m, off, sizeof(*ip), &ipbuf);
1351 if (ip == NULL)
1352 goto out;
1353
1354 p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p);
1355 p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p);
1356 break;
1357 #endif
1358 #ifdef INET6
1359 case ETHERTYPE_IPV6:
1360 ip6 = lagg_gethdr(m, off, sizeof(*ip6), &ip6buf);
1361 if (ip6 == NULL)
1362 goto out;
1363
1364 p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p);
1365 p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p);
1366 flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK;
1367 p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */
1368 break;
1369 #endif
1370 }
1371 out:
1372 return (p);
1373 }
1374
1375 int
1376 lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
1377 {
1378 int error = 0;
1379
1380 IFQ_HANDOFF(ifp, m, error);
1381 if (error)
1382 ifp->if_oerrors++;
1383 return (error);
1384 }
1385
1386 /*
1387 * Simple round robin aggregation
1388 */
1389
1390 static int
1391 lagg_rr_attach(struct lagg_softc *sc)
1392 {
1393 sc->sc_detach = lagg_rr_detach;
1394 sc->sc_start = lagg_rr_start;
1395 sc->sc_input = lagg_rr_input;
1396 sc->sc_port_create = NULL;
1397 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1398 sc->sc_seq = 0;
1399
1400 return (0);
1401 }
1402
1403 static int
1404 lagg_rr_detach(struct lagg_softc *sc)
1405 {
1406 return (0);
1407 }
1408
1409 static int
1410 lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
1411 {
1412 struct lagg_port *lp;
1413 uint32_t p;
1414
1415 p = atomic_fetchadd_32(&sc->sc_seq, 1);
1416 p %= sc->sc_count;
1417 lp = SLIST_FIRST(&sc->sc_ports);
1418 while (p--)
1419 lp = SLIST_NEXT(lp, lp_entries);
1420
1421 /*
1422 * Check the port's link state. This will return the next active
1423 * port if the link is down or the port is NULL.
1424 */
1425 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1426 m_freem(m);
1427 return (ENOENT);
1428 }
1429
1430 /* Send mbuf */
1431 return (lagg_enqueue(lp->lp_ifp, m));
1432 }
1433
1434 static struct mbuf *
1435 lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1436 {
1437 struct ifnet *ifp = sc->sc_ifp;
1438
1439 /* Just pass in the packet to our lagg device */
1440 m->m_pkthdr.rcvif = ifp;
1441
1442 return (m);
1443 }
1444
1445 /*
1446 * Active failover
1447 */
1448
1449 static int
1450 lagg_fail_attach(struct lagg_softc *sc)
1451 {
1452 sc->sc_detach = lagg_fail_detach;
1453 sc->sc_start = lagg_fail_start;
1454 sc->sc_input = lagg_fail_input;
1455 sc->sc_port_create = NULL;
1456 sc->sc_port_destroy = NULL;
1457
1458 return (0);
1459 }
1460
1461 static int
1462 lagg_fail_detach(struct lagg_softc *sc)
1463 {
1464 return (0);
1465 }
1466
1467 static int
1468 lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
1469 {
1470 struct lagg_port *lp;
1471
1472 /* Use the master port if active or the next available port */
1473 if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
1474 m_freem(m);
1475 return (ENOENT);
1476 }
1477
1478 /* Send mbuf */
1479 return (lagg_enqueue(lp->lp_ifp, m));
1480 }
1481
1482 static struct mbuf *
1483 lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1484 {
1485 struct ifnet *ifp = sc->sc_ifp;
1486 struct lagg_port *tmp_tp;
1487
1488 if (lp == sc->sc_primary || lagg_failover_rx_all) {
1489 m->m_pkthdr.rcvif = ifp;
1490 return (m);
1491 }
1492
1493 if (!LAGG_PORTACTIVE(sc->sc_primary)) {
1494 tmp_tp = lagg_link_active(sc, sc->sc_primary);
1495 /*
1496 * If tmp_tp is null, we've recieved a packet when all
1497 * our links are down. Weird, but process it anyways.
1498 */
1499 if ((tmp_tp == NULL || tmp_tp == lp)) {
1500 m->m_pkthdr.rcvif = ifp;
1501 return (m);
1502 }
1503 }
1504
1505 m_freem(m);
1506 return (NULL);
1507 }
1508
1509 /*
1510 * Loadbalancing
1511 */
1512
1513 static int
1514 lagg_lb_attach(struct lagg_softc *sc)
1515 {
1516 struct lagg_port *lp;
1517 struct lagg_lb *lb;
1518
1519 if ((lb = (struct lagg_lb *)malloc(sizeof(struct lagg_lb),
1520 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
1521 return (ENOMEM);
1522
1523 sc->sc_detach = lagg_lb_detach;
1524 sc->sc_start = lagg_lb_start;
1525 sc->sc_input = lagg_lb_input;
1526 sc->sc_port_create = lagg_lb_port_create;
1527 sc->sc_port_destroy = lagg_lb_port_destroy;
1528 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1529
1530 lb->lb_key = arc4random();
1531 sc->sc_psc = (caddr_t)lb;
1532
1533 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1534 lagg_lb_port_create(lp);
1535
1536 return (0);
1537 }
1538
1539 static int
1540 lagg_lb_detach(struct lagg_softc *sc)
1541 {
1542 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1543 if (lb != NULL)
1544 free(lb, M_DEVBUF);
1545 return (0);
1546 }
1547
1548 static int
1549 lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
1550 {
1551 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1552 struct lagg_port *lp_next;
1553 int i = 0;
1554
1555 bzero(&lb->lb_ports, sizeof(lb->lb_ports));
1556 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1557 if (lp_next == lp)
1558 continue;
1559 if (i >= LAGG_MAX_PORTS)
1560 return (EINVAL);
1561 if (sc->sc_ifflags & IFF_DEBUG)
1562 printf("%s: port %s at index %d\n",
1563 sc->sc_ifname, lp_next->lp_ifname, i);
1564 lb->lb_ports[i++] = lp_next;
1565 }
1566
1567 return (0);
1568 }
1569
1570 static int
1571 lagg_lb_port_create(struct lagg_port *lp)
1572 {
1573 struct lagg_softc *sc = lp->lp_softc;
1574 return (lagg_lb_porttable(sc, NULL));
1575 }
1576
1577 static void
1578 lagg_lb_port_destroy(struct lagg_port *lp)
1579 {
1580 struct lagg_softc *sc = lp->lp_softc;
1581 lagg_lb_porttable(sc, lp);
1582 }
1583
1584 static int
1585 lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
1586 {
1587 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1588 struct lagg_port *lp = NULL;
1589 uint32_t p = 0;
1590
1591 p = lagg_hashmbuf(m, lb->lb_key);
1592 p %= sc->sc_count;
1593 lp = lb->lb_ports[p];
1594
1595 /*
1596 * Check the port's link state. This will return the next active
1597 * port if the link is down or the port is NULL.
1598 */
1599 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1600 m_freem(m);
1601 return (ENOENT);
1602 }
1603
1604 /* Send mbuf */
1605 return (lagg_enqueue(lp->lp_ifp, m));
1606 }
1607
1608 static struct mbuf *
1609 lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1610 {
1611 struct ifnet *ifp = sc->sc_ifp;
1612
1613 /* Just pass in the packet to our lagg device */
1614 m->m_pkthdr.rcvif = ifp;
1615
1616 return (m);
1617 }
1618
1619 /*
1620 * 802.3ad LACP
1621 */
1622
1623 static int
1624 lagg_lacp_attach(struct lagg_softc *sc)
1625 {
1626 struct lagg_port *lp;
1627 int error;
1628
1629 sc->sc_detach = lagg_lacp_detach;
1630 sc->sc_port_create = lacp_port_create;
1631 sc->sc_port_destroy = lacp_port_destroy;
1632 sc->sc_linkstate = lacp_linkstate;
1633 sc->sc_start = lagg_lacp_start;
1634 sc->sc_input = lagg_lacp_input;
1635 sc->sc_init = lacp_init;
1636 sc->sc_stop = lacp_stop;
1637 sc->sc_lladdr = lagg_lacp_lladdr;
1638 sc->sc_req = lacp_req;
1639 sc->sc_portreq = lacp_portreq;
1640
1641 error = lacp_attach(sc);
1642 if (error)
1643 return (error);
1644
1645 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1646 lacp_port_create(lp);
1647
1648 return (error);
1649 }
1650
1651 static int
1652 lagg_lacp_detach(struct lagg_softc *sc)
1653 {
1654 struct lagg_port *lp;
1655 int error;
1656
1657 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1658 lacp_port_destroy(lp);
1659
1660 /* unlocking is safe here */
1661 LAGG_WUNLOCK(sc);
1662 error = lacp_detach(sc);
1663 LAGG_WLOCK(sc);
1664
1665 return (error);
1666 }
1667
1668 static void
1669 lagg_lacp_lladdr(struct lagg_softc *sc)
1670 {
1671 struct lagg_port *lp;
1672
1673 /* purge all the lacp ports */
1674 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1675 lacp_port_destroy(lp);
1676
1677 /* add them back in */
1678 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1679 lacp_port_create(lp);
1680 }
1681
1682 static int
1683 lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
1684 {
1685 struct lagg_port *lp;
1686
1687 lp = lacp_select_tx_port(sc, m);
1688 if (lp == NULL) {
1689 m_freem(m);
1690 return (EBUSY);
1691 }
1692
1693 /* Send mbuf */
1694 return (lagg_enqueue(lp->lp_ifp, m));
1695 }
1696
1697 static struct mbuf *
1698 lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1699 {
1700 struct ifnet *ifp = sc->sc_ifp;
1701 struct ether_header *eh;
1702 u_short etype;
1703
1704 eh = mtod(m, struct ether_header *);
1705 etype = ntohs(eh->ether_type);
1706
1707 /* Tap off LACP control messages */
1708 if (etype == ETHERTYPE_SLOW) {
1709 m = lacp_input(lp, m);
1710 if (m == NULL)
1711 return (NULL);
1712 }
1713
1714 /*
1715 * If the port is not collecting or not in the active aggregator then
1716 * free and return.
1717 */
1718 if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
1719 m_freem(m);
1720 return (NULL);
1721 }
1722
1723 m->m_pkthdr.rcvif = ifp;
1724 return (m);
1725 }
Cache object: 7cfca23c178ab04494adaa13ee110e01
|