FreeBSD/Linux Kernel Cross Reference
sys/net/if_lagg.c
1 /* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __FBSDID("$FreeBSD: releng/9.2/sys/net/if_lagg.c 253239 2013-07-12 01:52:31Z hrs $");
22
23 #include "opt_inet.h"
24 #include "opt_inet6.h"
25
26 #include <sys/param.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/mbuf.h>
30 #include <sys/queue.h>
31 #include <sys/socket.h>
32 #include <sys/sockio.h>
33 #include <sys/sysctl.h>
34 #include <sys/module.h>
35 #include <sys/priv.h>
36 #include <sys/systm.h>
37 #include <sys/proc.h>
38 #include <sys/hash.h>
39 #include <sys/lock.h>
40 #include <sys/rwlock.h>
41 #include <sys/taskqueue.h>
42 #include <sys/eventhandler.h>
43
44 #include <net/ethernet.h>
45 #include <net/if.h>
46 #include <net/if_clone.h>
47 #include <net/if_arp.h>
48 #include <net/if_dl.h>
49 #include <net/if_llc.h>
50 #include <net/if_media.h>
51 #include <net/if_types.h>
52 #include <net/if_var.h>
53 #include <net/bpf.h>
54
55 #if defined(INET) || defined(INET6)
56 #include <netinet/in.h>
57 #endif
58 #ifdef INET
59 #include <netinet/in_systm.h>
60 #include <netinet/if_ether.h>
61 #include <netinet/ip.h>
62 #endif
63
64 #ifdef INET6
65 #include <netinet/ip6.h>
66 #include <netinet6/in6_var.h>
67 #include <netinet6/in6_ifattach.h>
68 #endif
69
70 #include <net/if_vlan_var.h>
71 #include <net/if_lagg.h>
72 #include <net/ieee8023ad_lacp.h>
73
74 /* Special flags we should propagate to the lagg ports. */
75 static struct {
76 int flag;
77 int (*func)(struct ifnet *, int);
78 } lagg_pflags[] = {
79 {IFF_PROMISC, ifpromisc},
80 {IFF_ALLMULTI, if_allmulti},
81 {0, NULL}
82 };
83
84 SLIST_HEAD(__trhead, lagg_softc) lagg_list; /* list of laggs */
85 static struct mtx lagg_list_mtx;
86 eventhandler_tag lagg_detach_cookie = NULL;
87
88 static int lagg_clone_create(struct if_clone *, int, caddr_t);
89 static void lagg_clone_destroy(struct ifnet *);
90 static void lagg_lladdr(struct lagg_softc *, uint8_t *);
91 static void lagg_capabilities(struct lagg_softc *);
92 static void lagg_port_lladdr(struct lagg_port *, uint8_t *);
93 static void lagg_port_setlladdr(void *, int);
94 static int lagg_port_create(struct lagg_softc *, struct ifnet *);
95 static int lagg_port_destroy(struct lagg_port *, int);
96 static struct mbuf *lagg_input(struct ifnet *, struct mbuf *);
97 static void lagg_linkstate(struct lagg_softc *);
98 static void lagg_port_state(struct ifnet *, int);
99 static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
100 static int lagg_port_output(struct ifnet *, struct mbuf *,
101 struct sockaddr *, struct route *);
102 static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
103 #ifdef LAGG_PORT_STACKING
104 static int lagg_port_checkstacking(struct lagg_softc *);
105 #endif
106 static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
107 static void lagg_init(void *);
108 static void lagg_stop(struct lagg_softc *);
109 static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
110 static int lagg_ether_setmulti(struct lagg_softc *);
111 static int lagg_ether_cmdmulti(struct lagg_port *, int);
112 static int lagg_setflag(struct lagg_port *, int, int,
113 int (*func)(struct ifnet *, int));
114 static int lagg_setflags(struct lagg_port *, int status);
115 static int lagg_transmit(struct ifnet *, struct mbuf *);
116 static void lagg_qflush(struct ifnet *);
117 static int lagg_media_change(struct ifnet *);
118 static void lagg_media_status(struct ifnet *, struct ifmediareq *);
119 static struct lagg_port *lagg_link_active(struct lagg_softc *,
120 struct lagg_port *);
121 static const void *lagg_gethdr(struct mbuf *, u_int, u_int, void *);
122
123 IFC_SIMPLE_DECLARE(lagg, 0);
124
125 /* Simple round robin */
126 static int lagg_rr_attach(struct lagg_softc *);
127 static int lagg_rr_detach(struct lagg_softc *);
128 static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
129 static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
130 struct mbuf *);
131
132 /* Active failover */
133 static int lagg_fail_attach(struct lagg_softc *);
134 static int lagg_fail_detach(struct lagg_softc *);
135 static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
136 static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
137 struct mbuf *);
138
139 /* Loadbalancing */
140 static int lagg_lb_attach(struct lagg_softc *);
141 static int lagg_lb_detach(struct lagg_softc *);
142 static int lagg_lb_port_create(struct lagg_port *);
143 static void lagg_lb_port_destroy(struct lagg_port *);
144 static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
145 static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
146 struct mbuf *);
147 static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
148
149 /* 802.3ad LACP */
150 static int lagg_lacp_attach(struct lagg_softc *);
151 static int lagg_lacp_detach(struct lagg_softc *);
152 static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
153 static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
154 struct mbuf *);
155 static void lagg_lacp_lladdr(struct lagg_softc *);
156
157 /* lagg protocol table */
158 static const struct {
159 int ti_proto;
160 int (*ti_attach)(struct lagg_softc *);
161 } lagg_protos[] = {
162 { LAGG_PROTO_ROUNDROBIN, lagg_rr_attach },
163 { LAGG_PROTO_FAILOVER, lagg_fail_attach },
164 { LAGG_PROTO_LOADBALANCE, lagg_lb_attach },
165 { LAGG_PROTO_ETHERCHANNEL, lagg_lb_attach },
166 { LAGG_PROTO_LACP, lagg_lacp_attach },
167 { LAGG_PROTO_NONE, NULL }
168 };
169
170 SYSCTL_DECL(_net_link);
171 static SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW, 0,
172 "Link Aggregation");
173
174 static int lagg_failover_rx_all = 0; /* Allow input on any failover links */
175 SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW,
176 &lagg_failover_rx_all, 0,
177 "Accept input from any interface in a failover lagg");
178 static int def_use_flowid = 1; /* Default value for using M_FLOWID */
179 TUNABLE_INT("net.link.lagg.default_use_flowid", &def_use_flowid);
180 SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_flowid, CTLFLAG_RW,
181 &def_use_flowid, 0,
182 "Default setting for using flow id for load sharing");
183
184 static int
185 lagg_modevent(module_t mod, int type, void *data)
186 {
187
188 switch (type) {
189 case MOD_LOAD:
190 mtx_init(&lagg_list_mtx, "if_lagg list", NULL, MTX_DEF);
191 SLIST_INIT(&lagg_list);
192 if_clone_attach(&lagg_cloner);
193 lagg_input_p = lagg_input;
194 lagg_linkstate_p = lagg_port_state;
195 lagg_detach_cookie = EVENTHANDLER_REGISTER(
196 ifnet_departure_event, lagg_port_ifdetach, NULL,
197 EVENTHANDLER_PRI_ANY);
198 break;
199 case MOD_UNLOAD:
200 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
201 lagg_detach_cookie);
202 if_clone_detach(&lagg_cloner);
203 lagg_input_p = NULL;
204 lagg_linkstate_p = NULL;
205 mtx_destroy(&lagg_list_mtx);
206 break;
207 default:
208 return (EOPNOTSUPP);
209 }
210 return (0);
211 }
212
213 static moduledata_t lagg_mod = {
214 "if_lagg",
215 lagg_modevent,
216 0
217 };
218
219 DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
220 MODULE_VERSION(if_lagg, 1);
221
222 #if __FreeBSD_version >= 800000
223 /*
224 * This routine is run via an vlan
225 * config EVENT
226 */
227 static void
228 lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
229 {
230 struct lagg_softc *sc = ifp->if_softc;
231 struct lagg_port *lp;
232
233 if (ifp->if_softc != arg) /* Not our event */
234 return;
235
236 LAGG_RLOCK(sc);
237 if (!SLIST_EMPTY(&sc->sc_ports)) {
238 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
239 EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag);
240 }
241 LAGG_RUNLOCK(sc);
242 }
243
244 /*
245 * This routine is run via an vlan
246 * unconfig EVENT
247 */
248 static void
249 lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
250 {
251 struct lagg_softc *sc = ifp->if_softc;
252 struct lagg_port *lp;
253
254 if (ifp->if_softc != arg) /* Not our event */
255 return;
256
257 LAGG_RLOCK(sc);
258 if (!SLIST_EMPTY(&sc->sc_ports)) {
259 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
260 EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag);
261 }
262 LAGG_RUNLOCK(sc);
263 }
264 #endif
265
266 static int
267 lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
268 {
269 struct lagg_softc *sc;
270 struct ifnet *ifp;
271 int i, error = 0;
272 static const u_char eaddr[6]; /* 00:00:00:00:00:00 */
273 struct sysctl_oid *oid;
274 char num[14]; /* sufficient for 32 bits */
275
276 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
277 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
278 if (ifp == NULL) {
279 free(sc, M_DEVBUF);
280 return (ENOSPC);
281 }
282
283 sysctl_ctx_init(&sc->ctx);
284 snprintf(num, sizeof(num), "%u", unit);
285 sc->use_flowid = def_use_flowid;
286 oid = SYSCTL_ADD_NODE(&sc->ctx, &SYSCTL_NODE_CHILDREN(_net_link, lagg),
287 OID_AUTO, num, CTLFLAG_RD, NULL, "");
288 SYSCTL_ADD_INT(&sc->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
289 "use_flowid", CTLTYPE_INT|CTLFLAG_RW, &sc->use_flowid, sc->use_flowid,
290 "Use flow id for load sharing");
291 SYSCTL_ADD_INT(&sc->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
292 "count", CTLTYPE_INT|CTLFLAG_RD, &sc->sc_count, sc->sc_count,
293 "Total number of ports");
294 /* Hash all layers by default */
295 sc->sc_flags = LAGG_F_HASHL2|LAGG_F_HASHL3|LAGG_F_HASHL4;
296
297 sc->sc_proto = LAGG_PROTO_NONE;
298 for (i = 0; lagg_protos[i].ti_proto != LAGG_PROTO_NONE; i++) {
299 if (lagg_protos[i].ti_proto == LAGG_PROTO_DEFAULT) {
300 sc->sc_proto = lagg_protos[i].ti_proto;
301 if ((error = lagg_protos[i].ti_attach(sc)) != 0) {
302 if_free_type(ifp, IFT_ETHER);
303 free(sc, M_DEVBUF);
304 return (error);
305 }
306 break;
307 }
308 }
309 LAGG_LOCK_INIT(sc);
310 SLIST_INIT(&sc->sc_ports);
311 TASK_INIT(&sc->sc_lladdr_task, 0, lagg_port_setlladdr, sc);
312
313 /* Initialise pseudo media types */
314 ifmedia_init(&sc->sc_media, 0, lagg_media_change,
315 lagg_media_status);
316 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
317 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
318
319 if_initname(ifp, ifc->ifc_name, unit);
320 ifp->if_type = IFT_ETHER;
321 ifp->if_softc = sc;
322 ifp->if_transmit = lagg_transmit;
323 ifp->if_qflush = lagg_qflush;
324 ifp->if_init = lagg_init;
325 ifp->if_ioctl = lagg_ioctl;
326 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
327
328 /*
329 * Attach as an ordinary ethernet device, childs will be attached
330 * as special device IFT_IEEE8023ADLAG.
331 */
332 ether_ifattach(ifp, eaddr);
333
334 #if __FreeBSD_version >= 800000
335 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
336 lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
337 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
338 lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
339 #endif
340
341 /* Insert into the global list of laggs */
342 mtx_lock(&lagg_list_mtx);
343 SLIST_INSERT_HEAD(&lagg_list, sc, sc_entries);
344 mtx_unlock(&lagg_list_mtx);
345
346 return (0);
347 }
348
349 static void
350 lagg_clone_destroy(struct ifnet *ifp)
351 {
352 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
353 struct lagg_port *lp;
354
355 LAGG_WLOCK(sc);
356
357 lagg_stop(sc);
358 ifp->if_flags &= ~IFF_UP;
359
360 #if __FreeBSD_version >= 800000
361 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
362 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
363 #endif
364
365 /* Shutdown and remove lagg ports */
366 while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL)
367 lagg_port_destroy(lp, 1);
368 /* Unhook the aggregation protocol */
369 if (sc->sc_detach != NULL)
370 (*sc->sc_detach)(sc);
371
372 LAGG_WUNLOCK(sc);
373
374 sysctl_ctx_free(&sc->ctx);
375 ifmedia_removeall(&sc->sc_media);
376 ether_ifdetach(ifp);
377 if_free_type(ifp, IFT_ETHER);
378
379 mtx_lock(&lagg_list_mtx);
380 SLIST_REMOVE(&lagg_list, sc, lagg_softc, sc_entries);
381 mtx_unlock(&lagg_list_mtx);
382
383 taskqueue_drain(taskqueue_swi, &sc->sc_lladdr_task);
384 LAGG_LOCK_DESTROY(sc);
385 free(sc, M_DEVBUF);
386 }
387
388 static void
389 lagg_lladdr(struct lagg_softc *sc, uint8_t *lladdr)
390 {
391 struct ifnet *ifp = sc->sc_ifp;
392
393 if (memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
394 return;
395
396 bcopy(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN);
397 /* Let the protocol know the MAC has changed */
398 if (sc->sc_lladdr != NULL)
399 (*sc->sc_lladdr)(sc);
400 EVENTHANDLER_INVOKE(iflladdr_event, ifp);
401 }
402
403 static void
404 lagg_capabilities(struct lagg_softc *sc)
405 {
406 struct lagg_port *lp;
407 int cap = ~0, ena = ~0;
408 u_long hwa = ~0UL;
409
410 LAGG_WLOCK_ASSERT(sc);
411
412 /* Get capabilities from the lagg ports */
413 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
414 cap &= lp->lp_ifp->if_capabilities;
415 ena &= lp->lp_ifp->if_capenable;
416 hwa &= lp->lp_ifp->if_hwassist;
417 }
418 cap = (cap == ~0 ? 0 : cap);
419 ena = (ena == ~0 ? 0 : ena);
420 hwa = (hwa == ~0 ? 0 : hwa);
421
422 if (sc->sc_ifp->if_capabilities != cap ||
423 sc->sc_ifp->if_capenable != ena ||
424 sc->sc_ifp->if_hwassist != hwa) {
425 sc->sc_ifp->if_capabilities = cap;
426 sc->sc_ifp->if_capenable = ena;
427 sc->sc_ifp->if_hwassist = hwa;
428 getmicrotime(&sc->sc_ifp->if_lastchange);
429
430 if (sc->sc_ifflags & IFF_DEBUG)
431 if_printf(sc->sc_ifp,
432 "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
433 }
434 }
435
436 static void
437 lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr)
438 {
439 struct lagg_softc *sc = lp->lp_softc;
440 struct ifnet *ifp = lp->lp_ifp;
441 struct lagg_llq *llq;
442 int pending = 0;
443
444 LAGG_WLOCK_ASSERT(sc);
445
446 if (lp->lp_detaching ||
447 memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
448 return;
449
450 /* Check to make sure its not already queued to be changed */
451 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
452 if (llq->llq_ifp == ifp) {
453 pending = 1;
454 break;
455 }
456 }
457
458 if (!pending) {
459 llq = malloc(sizeof(struct lagg_llq), M_DEVBUF, M_NOWAIT);
460 if (llq == NULL) /* XXX what to do */
461 return;
462 }
463
464 /* Update the lladdr even if pending, it may have changed */
465 llq->llq_ifp = ifp;
466 bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN);
467
468 if (!pending)
469 SLIST_INSERT_HEAD(&sc->sc_llq_head, llq, llq_entries);
470
471 taskqueue_enqueue(taskqueue_swi, &sc->sc_lladdr_task);
472 }
473
474 /*
475 * Set the interface MAC address from a taskqueue to avoid a LOR.
476 */
477 static void
478 lagg_port_setlladdr(void *arg, int pending)
479 {
480 struct lagg_softc *sc = (struct lagg_softc *)arg;
481 struct lagg_llq *llq, *head;
482 struct ifnet *ifp;
483 int error;
484
485 /* Grab a local reference of the queue and remove it from the softc */
486 LAGG_WLOCK(sc);
487 head = SLIST_FIRST(&sc->sc_llq_head);
488 SLIST_FIRST(&sc->sc_llq_head) = NULL;
489 LAGG_WUNLOCK(sc);
490
491 /*
492 * Traverse the queue and set the lladdr on each ifp. It is safe to do
493 * unlocked as we have the only reference to it.
494 */
495 for (llq = head; llq != NULL; llq = head) {
496 ifp = llq->llq_ifp;
497
498 /* Set the link layer address */
499 CURVNET_SET(ifp->if_vnet);
500 error = if_setlladdr(ifp, llq->llq_lladdr, ETHER_ADDR_LEN);
501 CURVNET_RESTORE();
502 if (error)
503 printf("%s: setlladdr failed on %s\n", __func__,
504 ifp->if_xname);
505
506 head = SLIST_NEXT(llq, llq_entries);
507 free(llq, M_DEVBUF);
508 }
509 }
510
511 static int
512 lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
513 {
514 struct lagg_softc *sc_ptr;
515 struct lagg_port *lp;
516 int error = 0;
517
518 LAGG_WLOCK_ASSERT(sc);
519
520 /* Limit the maximal number of lagg ports */
521 if (sc->sc_count >= LAGG_MAX_PORTS)
522 return (ENOSPC);
523
524 /* Check if port has already been associated to a lagg */
525 if (ifp->if_lagg != NULL) {
526 /* Port is already in the current lagg? */
527 lp = (struct lagg_port *)ifp->if_lagg;
528 if (lp->lp_softc == sc)
529 return (EEXIST);
530 return (EBUSY);
531 }
532
533 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */
534 if (ifp->if_type != IFT_ETHER)
535 return (EPROTONOSUPPORT);
536
537 #ifdef INET6
538 /*
539 * The member interface should not have inet6 address because
540 * two interfaces with a valid link-local scope zone must not be
541 * merged in any form. This restriction is needed to
542 * prevent violation of link-local scope zone. Attempts to
543 * add a member interface which has inet6 addresses triggers
544 * removal of all inet6 addresses on the member interface.
545 */
546 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
547 if (in6ifa_llaonifp(lp->lp_ifp)) {
548 in6_ifdetach(lp->lp_ifp);
549 if_printf(sc->sc_ifp,
550 "IPv6 addresses on %s have been removed "
551 "before adding it as a member to prevent "
552 "IPv6 address scope violation.\n",
553 lp->lp_ifp->if_xname);
554 }
555 }
556 if (in6ifa_llaonifp(ifp)) {
557 in6_ifdetach(ifp);
558 if_printf(sc->sc_ifp,
559 "IPv6 addresses on %s have been removed "
560 "before adding it as a member to prevent "
561 "IPv6 address scope violation.\n",
562 ifp->if_xname);
563 }
564 #endif
565 /* Allow the first Ethernet member to define the MTU */
566 if (SLIST_EMPTY(&sc->sc_ports))
567 sc->sc_ifp->if_mtu = ifp->if_mtu;
568 else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
569 if_printf(sc->sc_ifp, "invalid MTU for %s\n",
570 ifp->if_xname);
571 return (EINVAL);
572 }
573
574 if ((lp = malloc(sizeof(struct lagg_port),
575 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
576 return (ENOMEM);
577
578 /* Check if port is a stacked lagg */
579 mtx_lock(&lagg_list_mtx);
580 SLIST_FOREACH(sc_ptr, &lagg_list, sc_entries) {
581 if (ifp == sc_ptr->sc_ifp) {
582 mtx_unlock(&lagg_list_mtx);
583 free(lp, M_DEVBUF);
584 return (EINVAL);
585 /* XXX disable stacking for the moment, its untested */
586 #ifdef LAGG_PORT_STACKING
587 lp->lp_flags |= LAGG_PORT_STACK;
588 if (lagg_port_checkstacking(sc_ptr) >=
589 LAGG_MAX_STACKING) {
590 mtx_unlock(&lagg_list_mtx);
591 free(lp, M_DEVBUF);
592 return (E2BIG);
593 }
594 #endif
595 }
596 }
597 mtx_unlock(&lagg_list_mtx);
598
599 /* Change the interface type */
600 lp->lp_iftype = ifp->if_type;
601 ifp->if_type = IFT_IEEE8023ADLAG;
602 ifp->if_lagg = lp;
603 lp->lp_ioctl = ifp->if_ioctl;
604 ifp->if_ioctl = lagg_port_ioctl;
605 lp->lp_output = ifp->if_output;
606 ifp->if_output = lagg_port_output;
607
608 lp->lp_ifp = ifp;
609 lp->lp_softc = sc;
610
611 /* Save port link layer address */
612 bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
613
614 if (SLIST_EMPTY(&sc->sc_ports)) {
615 sc->sc_primary = lp;
616 lagg_lladdr(sc, IF_LLADDR(ifp));
617 } else {
618 /* Update link layer address for this port */
619 lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp));
620 }
621
622 /* Insert into the list of ports */
623 SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
624 sc->sc_count++;
625
626 /* Update lagg capabilities */
627 lagg_capabilities(sc);
628 lagg_linkstate(sc);
629
630 /* Add multicast addresses and interface flags to this port */
631 lagg_ether_cmdmulti(lp, 1);
632 lagg_setflags(lp, 1);
633
634 if (sc->sc_port_create != NULL)
635 error = (*sc->sc_port_create)(lp);
636 if (error) {
637 /* remove the port again, without calling sc_port_destroy */
638 lagg_port_destroy(lp, 0);
639 return (error);
640 }
641
642 return (error);
643 }
644
645 #ifdef LAGG_PORT_STACKING
646 static int
647 lagg_port_checkstacking(struct lagg_softc *sc)
648 {
649 struct lagg_softc *sc_ptr;
650 struct lagg_port *lp;
651 int m = 0;
652
653 LAGG_WLOCK_ASSERT(sc);
654
655 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
656 if (lp->lp_flags & LAGG_PORT_STACK) {
657 sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
658 m = MAX(m, lagg_port_checkstacking(sc_ptr));
659 }
660 }
661
662 return (m + 1);
663 }
664 #endif
665
666 static int
667 lagg_port_destroy(struct lagg_port *lp, int runpd)
668 {
669 struct lagg_softc *sc = lp->lp_softc;
670 struct lagg_port *lp_ptr;
671 struct lagg_llq *llq;
672 struct ifnet *ifp = lp->lp_ifp;
673
674 LAGG_WLOCK_ASSERT(sc);
675
676 if (runpd && sc->sc_port_destroy != NULL)
677 (*sc->sc_port_destroy)(lp);
678
679 /*
680 * Remove multicast addresses and interface flags from this port and
681 * reset the MAC address, skip if the interface is being detached.
682 */
683 if (!lp->lp_detaching) {
684 lagg_ether_cmdmulti(lp, 0);
685 lagg_setflags(lp, 0);
686 lagg_port_lladdr(lp, lp->lp_lladdr);
687 }
688
689 /* Restore interface */
690 ifp->if_type = lp->lp_iftype;
691 ifp->if_ioctl = lp->lp_ioctl;
692 ifp->if_output = lp->lp_output;
693 ifp->if_lagg = NULL;
694
695 /* Finally, remove the port from the lagg */
696 SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
697 sc->sc_count--;
698
699 /* Update the primary interface */
700 if (lp == sc->sc_primary) {
701 uint8_t lladdr[ETHER_ADDR_LEN];
702
703 if ((lp_ptr = SLIST_FIRST(&sc->sc_ports)) == NULL) {
704 bzero(&lladdr, ETHER_ADDR_LEN);
705 } else {
706 bcopy(lp_ptr->lp_lladdr,
707 lladdr, ETHER_ADDR_LEN);
708 }
709 lagg_lladdr(sc, lladdr);
710 sc->sc_primary = lp_ptr;
711
712 /* Update link layer address for each port */
713 SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
714 lagg_port_lladdr(lp_ptr, lladdr);
715 }
716
717 /* Remove any pending lladdr changes from the queue */
718 if (lp->lp_detaching) {
719 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
720 if (llq->llq_ifp == ifp) {
721 SLIST_REMOVE(&sc->sc_llq_head, llq, lagg_llq,
722 llq_entries);
723 free(llq, M_DEVBUF);
724 break; /* Only appears once */
725 }
726 }
727 }
728
729 if (lp->lp_ifflags)
730 if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
731
732 free(lp, M_DEVBUF);
733
734 /* Update lagg capabilities */
735 lagg_capabilities(sc);
736 lagg_linkstate(sc);
737
738 return (0);
739 }
740
741 static int
742 lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
743 {
744 struct lagg_reqport *rp = (struct lagg_reqport *)data;
745 struct lagg_softc *sc;
746 struct lagg_port *lp = NULL;
747 int error = 0;
748
749 /* Should be checked by the caller */
750 if (ifp->if_type != IFT_IEEE8023ADLAG ||
751 (lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
752 goto fallback;
753
754 switch (cmd) {
755 case SIOCGLAGGPORT:
756 if (rp->rp_portname[0] == '\0' ||
757 ifunit(rp->rp_portname) != ifp) {
758 error = EINVAL;
759 break;
760 }
761
762 LAGG_RLOCK(sc);
763 if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
764 error = ENOENT;
765 LAGG_RUNLOCK(sc);
766 break;
767 }
768
769 lagg_port2req(lp, rp);
770 LAGG_RUNLOCK(sc);
771 break;
772
773 case SIOCSIFCAP:
774 if (lp->lp_ioctl == NULL) {
775 error = EINVAL;
776 break;
777 }
778 error = (*lp->lp_ioctl)(ifp, cmd, data);
779 if (error)
780 break;
781
782 /* Update lagg interface capabilities */
783 LAGG_WLOCK(sc);
784 lagg_capabilities(sc);
785 LAGG_WUNLOCK(sc);
786 break;
787
788 case SIOCSIFMTU:
789 /* Do not allow the MTU to be changed once joined */
790 error = EINVAL;
791 break;
792
793 default:
794 goto fallback;
795 }
796
797 return (error);
798
799 fallback:
800 if (lp->lp_ioctl != NULL)
801 return ((*lp->lp_ioctl)(ifp, cmd, data));
802
803 return (EINVAL);
804 }
805
806 /*
807 * For direct output to child ports.
808 */
809 static int
810 lagg_port_output(struct ifnet *ifp, struct mbuf *m,
811 struct sockaddr *dst, struct route *ro)
812 {
813 struct lagg_port *lp = ifp->if_lagg;
814
815 switch (dst->sa_family) {
816 case pseudo_AF_HDRCMPLT:
817 case AF_UNSPEC:
818 return ((*lp->lp_output)(ifp, m, dst, ro));
819 }
820
821 /* drop any other frames */
822 m_freem(m);
823 return (EBUSY);
824 }
825
826 static void
827 lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
828 {
829 struct lagg_port *lp;
830 struct lagg_softc *sc;
831
832 if ((lp = ifp->if_lagg) == NULL)
833 return;
834 /* If the ifnet is just being renamed, don't do anything. */
835 if (ifp->if_flags & IFF_RENAMING)
836 return;
837
838 sc = lp->lp_softc;
839
840 LAGG_WLOCK(sc);
841 lp->lp_detaching = 1;
842 lagg_port_destroy(lp, 1);
843 LAGG_WUNLOCK(sc);
844 }
845
846 static void
847 lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
848 {
849 struct lagg_softc *sc = lp->lp_softc;
850
851 strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
852 strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
853 rp->rp_prio = lp->lp_prio;
854 rp->rp_flags = lp->lp_flags;
855 if (sc->sc_portreq != NULL)
856 (*sc->sc_portreq)(lp, (caddr_t)&rp->rp_psc);
857
858 /* Add protocol specific flags */
859 switch (sc->sc_proto) {
860 case LAGG_PROTO_FAILOVER:
861 if (lp == sc->sc_primary)
862 rp->rp_flags |= LAGG_PORT_MASTER;
863 if (lp == lagg_link_active(sc, sc->sc_primary))
864 rp->rp_flags |= LAGG_PORT_ACTIVE;
865 break;
866
867 case LAGG_PROTO_ROUNDROBIN:
868 case LAGG_PROTO_LOADBALANCE:
869 case LAGG_PROTO_ETHERCHANNEL:
870 if (LAGG_PORTACTIVE(lp))
871 rp->rp_flags |= LAGG_PORT_ACTIVE;
872 break;
873
874 case LAGG_PROTO_LACP:
875 /* LACP has a different definition of active */
876 if (lacp_isactive(lp))
877 rp->rp_flags |= LAGG_PORT_ACTIVE;
878 if (lacp_iscollecting(lp))
879 rp->rp_flags |= LAGG_PORT_COLLECTING;
880 if (lacp_isdistributing(lp))
881 rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
882 break;
883 }
884
885 }
886
887 static void
888 lagg_init(void *xsc)
889 {
890 struct lagg_softc *sc = (struct lagg_softc *)xsc;
891 struct lagg_port *lp;
892 struct ifnet *ifp = sc->sc_ifp;
893
894 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
895 return;
896
897 LAGG_WLOCK(sc);
898
899 ifp->if_drv_flags |= IFF_DRV_RUNNING;
900 /* Update the port lladdrs */
901 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
902 lagg_port_lladdr(lp, IF_LLADDR(ifp));
903
904 if (sc->sc_init != NULL)
905 (*sc->sc_init)(sc);
906
907 LAGG_WUNLOCK(sc);
908 }
909
910 static void
911 lagg_stop(struct lagg_softc *sc)
912 {
913 struct ifnet *ifp = sc->sc_ifp;
914
915 LAGG_WLOCK_ASSERT(sc);
916
917 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
918 return;
919
920 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
921
922 if (sc->sc_stop != NULL)
923 (*sc->sc_stop)(sc);
924 }
925
926 static int
927 lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
928 {
929 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
930 struct lagg_reqall *ra = (struct lagg_reqall *)data;
931 struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
932 struct lagg_reqflags *rf = (struct lagg_reqflags *)data;
933 struct ifreq *ifr = (struct ifreq *)data;
934 struct lagg_port *lp;
935 struct ifnet *tpif;
936 struct thread *td = curthread;
937 char *buf, *outbuf;
938 int count, buflen, len, error = 0;
939
940 bzero(&rpbuf, sizeof(rpbuf));
941
942 switch (cmd) {
943 case SIOCGLAGG:
944 LAGG_RLOCK(sc);
945 count = 0;
946 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
947 count++;
948 buflen = count * sizeof(struct lagg_reqport);
949 LAGG_RUNLOCK(sc);
950
951 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
952
953 LAGG_RLOCK(sc);
954 ra->ra_proto = sc->sc_proto;
955 if (sc->sc_req != NULL)
956 (*sc->sc_req)(sc, (caddr_t)&ra->ra_psc);
957
958 count = 0;
959 buf = outbuf;
960 len = min(ra->ra_size, buflen);
961 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
962 if (len < sizeof(rpbuf))
963 break;
964
965 lagg_port2req(lp, &rpbuf);
966 memcpy(buf, &rpbuf, sizeof(rpbuf));
967 count++;
968 buf += sizeof(rpbuf);
969 len -= sizeof(rpbuf);
970 }
971 LAGG_RUNLOCK(sc);
972 ra->ra_ports = count;
973 ra->ra_size = count * sizeof(rpbuf);
974 error = copyout(outbuf, ra->ra_port, ra->ra_size);
975 free(outbuf, M_TEMP);
976 break;
977 case SIOCSLAGG:
978 error = priv_check(td, PRIV_NET_LAGG);
979 if (error)
980 break;
981 if (ra->ra_proto >= LAGG_PROTO_MAX) {
982 error = EPROTONOSUPPORT;
983 break;
984 }
985 LAGG_WLOCK(sc);
986 if (sc->sc_proto != LAGG_PROTO_NONE) {
987 /* Reset protocol first in case detach unlocks */
988 sc->sc_proto = LAGG_PROTO_NONE;
989 error = sc->sc_detach(sc);
990 sc->sc_detach = NULL;
991 sc->sc_start = NULL;
992 sc->sc_input = NULL;
993 sc->sc_port_create = NULL;
994 sc->sc_port_destroy = NULL;
995 sc->sc_linkstate = NULL;
996 sc->sc_init = NULL;
997 sc->sc_stop = NULL;
998 sc->sc_lladdr = NULL;
999 sc->sc_req = NULL;
1000 sc->sc_portreq = NULL;
1001 } else if (sc->sc_input != NULL) {
1002 /* Still detaching */
1003 error = EBUSY;
1004 }
1005 if (error != 0) {
1006 LAGG_WUNLOCK(sc);
1007 break;
1008 }
1009 for (int i = 0; i < (sizeof(lagg_protos) /
1010 sizeof(lagg_protos[0])); i++) {
1011 if (lagg_protos[i].ti_proto == ra->ra_proto) {
1012 if (sc->sc_ifflags & IFF_DEBUG)
1013 printf("%s: using proto %u\n",
1014 sc->sc_ifname,
1015 lagg_protos[i].ti_proto);
1016 sc->sc_proto = lagg_protos[i].ti_proto;
1017 if (sc->sc_proto != LAGG_PROTO_NONE)
1018 error = lagg_protos[i].ti_attach(sc);
1019 LAGG_WUNLOCK(sc);
1020 return (error);
1021 }
1022 }
1023 LAGG_WUNLOCK(sc);
1024 error = EPROTONOSUPPORT;
1025 break;
1026 case SIOCGLAGGFLAGS:
1027 rf->rf_flags = sc->sc_flags;
1028 break;
1029 case SIOCSLAGGHASH:
1030 error = priv_check(td, PRIV_NET_LAGG);
1031 if (error)
1032 break;
1033 if ((rf->rf_flags & LAGG_F_HASHMASK) == 0) {
1034 error = EINVAL;
1035 break;
1036 }
1037 LAGG_WLOCK(sc);
1038 sc->sc_flags &= ~LAGG_F_HASHMASK;
1039 sc->sc_flags |= rf->rf_flags & LAGG_F_HASHMASK;
1040 LAGG_WUNLOCK(sc);
1041 break;
1042 case SIOCGLAGGPORT:
1043 if (rp->rp_portname[0] == '\0' ||
1044 (tpif = ifunit(rp->rp_portname)) == NULL) {
1045 error = EINVAL;
1046 break;
1047 }
1048
1049 LAGG_RLOCK(sc);
1050 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1051 lp->lp_softc != sc) {
1052 error = ENOENT;
1053 LAGG_RUNLOCK(sc);
1054 break;
1055 }
1056
1057 lagg_port2req(lp, rp);
1058 LAGG_RUNLOCK(sc);
1059 break;
1060 case SIOCSLAGGPORT:
1061 error = priv_check(td, PRIV_NET_LAGG);
1062 if (error)
1063 break;
1064 if (rp->rp_portname[0] == '\0' ||
1065 (tpif = ifunit(rp->rp_portname)) == NULL) {
1066 error = EINVAL;
1067 break;
1068 }
1069 LAGG_WLOCK(sc);
1070 error = lagg_port_create(sc, tpif);
1071 LAGG_WUNLOCK(sc);
1072 break;
1073 case SIOCSLAGGDELPORT:
1074 error = priv_check(td, PRIV_NET_LAGG);
1075 if (error)
1076 break;
1077 if (rp->rp_portname[0] == '\0' ||
1078 (tpif = ifunit(rp->rp_portname)) == NULL) {
1079 error = EINVAL;
1080 break;
1081 }
1082
1083 LAGG_WLOCK(sc);
1084 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1085 lp->lp_softc != sc) {
1086 error = ENOENT;
1087 LAGG_WUNLOCK(sc);
1088 break;
1089 }
1090
1091 error = lagg_port_destroy(lp, 1);
1092 LAGG_WUNLOCK(sc);
1093 break;
1094 case SIOCSIFFLAGS:
1095 /* Set flags on ports too */
1096 LAGG_WLOCK(sc);
1097 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1098 lagg_setflags(lp, 1);
1099 }
1100 LAGG_WUNLOCK(sc);
1101
1102 if (!(ifp->if_flags & IFF_UP) &&
1103 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1104 /*
1105 * If interface is marked down and it is running,
1106 * then stop and disable it.
1107 */
1108 LAGG_WLOCK(sc);
1109 lagg_stop(sc);
1110 LAGG_WUNLOCK(sc);
1111 } else if ((ifp->if_flags & IFF_UP) &&
1112 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1113 /*
1114 * If interface is marked up and it is stopped, then
1115 * start it.
1116 */
1117 (*ifp->if_init)(sc);
1118 }
1119 break;
1120 case SIOCADDMULTI:
1121 case SIOCDELMULTI:
1122 LAGG_WLOCK(sc);
1123 error = lagg_ether_setmulti(sc);
1124 LAGG_WUNLOCK(sc);
1125 break;
1126 case SIOCSIFMEDIA:
1127 case SIOCGIFMEDIA:
1128 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1129 break;
1130
1131 case SIOCSIFCAP:
1132 case SIOCSIFMTU:
1133 /* Do not allow the MTU or caps to be directly changed */
1134 error = EINVAL;
1135 break;
1136
1137 default:
1138 error = ether_ioctl(ifp, cmd, data);
1139 break;
1140 }
1141 return (error);
1142 }
1143
1144 static int
1145 lagg_ether_setmulti(struct lagg_softc *sc)
1146 {
1147 struct lagg_port *lp;
1148
1149 LAGG_WLOCK_ASSERT(sc);
1150
1151 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1152 /* First, remove any existing filter entries. */
1153 lagg_ether_cmdmulti(lp, 0);
1154 /* copy all addresses from the lagg interface to the port */
1155 lagg_ether_cmdmulti(lp, 1);
1156 }
1157 return (0);
1158 }
1159
1160 static int
1161 lagg_ether_cmdmulti(struct lagg_port *lp, int set)
1162 {
1163 struct lagg_softc *sc = lp->lp_softc;
1164 struct ifnet *ifp = lp->lp_ifp;
1165 struct ifnet *scifp = sc->sc_ifp;
1166 struct lagg_mc *mc;
1167 struct ifmultiaddr *ifma, *rifma = NULL;
1168 struct sockaddr_dl sdl;
1169 int error;
1170
1171 LAGG_WLOCK_ASSERT(sc);
1172
1173 bzero((char *)&sdl, sizeof(sdl));
1174 sdl.sdl_len = sizeof(sdl);
1175 sdl.sdl_family = AF_LINK;
1176 sdl.sdl_type = IFT_ETHER;
1177 sdl.sdl_alen = ETHER_ADDR_LEN;
1178 sdl.sdl_index = ifp->if_index;
1179
1180 if (set) {
1181 TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
1182 if (ifma->ifma_addr->sa_family != AF_LINK)
1183 continue;
1184 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1185 LLADDR(&sdl), ETHER_ADDR_LEN);
1186
1187 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
1188 if (error)
1189 return (error);
1190 mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT);
1191 if (mc == NULL)
1192 return (ENOMEM);
1193 mc->mc_ifma = rifma;
1194 SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
1195 }
1196 } else {
1197 while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
1198 SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
1199 if_delmulti_ifma(mc->mc_ifma);
1200 free(mc, M_DEVBUF);
1201 }
1202 }
1203 return (0);
1204 }
1205
1206 /* Handle a ref counted flag that should be set on the lagg port as well */
1207 static int
1208 lagg_setflag(struct lagg_port *lp, int flag, int status,
1209 int (*func)(struct ifnet *, int))
1210 {
1211 struct lagg_softc *sc = lp->lp_softc;
1212 struct ifnet *scifp = sc->sc_ifp;
1213 struct ifnet *ifp = lp->lp_ifp;
1214 int error;
1215
1216 LAGG_WLOCK_ASSERT(sc);
1217
1218 status = status ? (scifp->if_flags & flag) : 0;
1219 /* Now "status" contains the flag value or 0 */
1220
1221 /*
1222 * See if recorded ports status is different from what
1223 * we want it to be. If it is, flip it. We record ports
1224 * status in lp_ifflags so that we won't clear ports flag
1225 * we haven't set. In fact, we don't clear or set ports
1226 * flags directly, but get or release references to them.
1227 * That's why we can be sure that recorded flags still are
1228 * in accord with actual ports flags.
1229 */
1230 if (status != (lp->lp_ifflags & flag)) {
1231 error = (*func)(ifp, status);
1232 if (error)
1233 return (error);
1234 lp->lp_ifflags &= ~flag;
1235 lp->lp_ifflags |= status;
1236 }
1237 return (0);
1238 }
1239
1240 /*
1241 * Handle IFF_* flags that require certain changes on the lagg port
1242 * if "status" is true, update ports flags respective to the lagg
1243 * if "status" is false, forcedly clear the flags set on port.
1244 */
1245 static int
1246 lagg_setflags(struct lagg_port *lp, int status)
1247 {
1248 int error, i;
1249
1250 for (i = 0; lagg_pflags[i].flag; i++) {
1251 error = lagg_setflag(lp, lagg_pflags[i].flag,
1252 status, lagg_pflags[i].func);
1253 if (error)
1254 return (error);
1255 }
1256 return (0);
1257 }
1258
1259 static int
1260 lagg_transmit(struct ifnet *ifp, struct mbuf *m)
1261 {
1262 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1263 int error, len, mcast;
1264
1265 len = m->m_pkthdr.len;
1266 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
1267
1268 LAGG_RLOCK(sc);
1269 /* We need a Tx algorithm and at least one port */
1270 if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
1271 LAGG_RUNLOCK(sc);
1272 m_freem(m);
1273 ifp->if_oerrors++;
1274 return (ENXIO);
1275 }
1276
1277 ETHER_BPF_MTAP(ifp, m);
1278
1279 error = (*sc->sc_start)(sc, m);
1280 LAGG_RUNLOCK(sc);
1281
1282 if (error == 0) {
1283 ifp->if_opackets++;
1284 ifp->if_omcasts += mcast;
1285 ifp->if_obytes += len;
1286 } else
1287 ifp->if_oerrors++;
1288
1289 return (error);
1290 }
1291
1292 /*
1293 * The ifp->if_qflush entry point for lagg(4) is no-op.
1294 */
1295 static void
1296 lagg_qflush(struct ifnet *ifp __unused)
1297 {
1298 }
1299
1300 static struct mbuf *
1301 lagg_input(struct ifnet *ifp, struct mbuf *m)
1302 {
1303 struct lagg_port *lp = ifp->if_lagg;
1304 struct lagg_softc *sc = lp->lp_softc;
1305 struct ifnet *scifp = sc->sc_ifp;
1306
1307 LAGG_RLOCK(sc);
1308 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1309 (lp->lp_flags & LAGG_PORT_DISABLED) ||
1310 sc->sc_proto == LAGG_PROTO_NONE) {
1311 LAGG_RUNLOCK(sc);
1312 m_freem(m);
1313 return (NULL);
1314 }
1315
1316 ETHER_BPF_MTAP(scifp, m);
1317
1318 m = (*sc->sc_input)(sc, lp, m);
1319
1320 if (m != NULL) {
1321 scifp->if_ipackets++;
1322 scifp->if_ibytes += m->m_pkthdr.len;
1323
1324 if (scifp->if_flags & IFF_MONITOR) {
1325 m_freem(m);
1326 m = NULL;
1327 }
1328 }
1329
1330 LAGG_RUNLOCK(sc);
1331 return (m);
1332 }
1333
1334 static int
1335 lagg_media_change(struct ifnet *ifp)
1336 {
1337 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1338
1339 if (sc->sc_ifflags & IFF_DEBUG)
1340 printf("%s\n", __func__);
1341
1342 /* Ignore */
1343 return (0);
1344 }
1345
1346 static void
1347 lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1348 {
1349 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1350 struct lagg_port *lp;
1351
1352 imr->ifm_status = IFM_AVALID;
1353 imr->ifm_active = IFM_ETHER | IFM_AUTO;
1354
1355 LAGG_RLOCK(sc);
1356 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1357 if (LAGG_PORTACTIVE(lp))
1358 imr->ifm_status |= IFM_ACTIVE;
1359 }
1360 LAGG_RUNLOCK(sc);
1361 }
1362
1363 static void
1364 lagg_linkstate(struct lagg_softc *sc)
1365 {
1366 struct lagg_port *lp;
1367 int new_link = LINK_STATE_DOWN;
1368 uint64_t speed;
1369
1370 /* Our link is considered up if at least one of our ports is active */
1371 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1372 if (lp->lp_link_state == LINK_STATE_UP) {
1373 new_link = LINK_STATE_UP;
1374 break;
1375 }
1376 }
1377 if_link_state_change(sc->sc_ifp, new_link);
1378
1379 /* Update if_baudrate to reflect the max possible speed */
1380 switch (sc->sc_proto) {
1381 case LAGG_PROTO_FAILOVER:
1382 sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ?
1383 sc->sc_primary->lp_ifp->if_baudrate : 0;
1384 break;
1385 case LAGG_PROTO_ROUNDROBIN:
1386 case LAGG_PROTO_LOADBALANCE:
1387 case LAGG_PROTO_ETHERCHANNEL:
1388 speed = 0;
1389 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1390 speed += lp->lp_ifp->if_baudrate;
1391 sc->sc_ifp->if_baudrate = speed;
1392 break;
1393 case LAGG_PROTO_LACP:
1394 /* LACP updates if_baudrate itself */
1395 break;
1396 }
1397 }
1398
1399 static void
1400 lagg_port_state(struct ifnet *ifp, int state)
1401 {
1402 struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
1403 struct lagg_softc *sc = NULL;
1404
1405 if (lp != NULL)
1406 sc = lp->lp_softc;
1407 if (sc == NULL)
1408 return;
1409
1410 LAGG_WLOCK(sc);
1411 lagg_linkstate(sc);
1412 if (sc->sc_linkstate != NULL)
1413 (*sc->sc_linkstate)(lp);
1414 LAGG_WUNLOCK(sc);
1415 }
1416
1417 struct lagg_port *
1418 lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
1419 {
1420 struct lagg_port *lp_next, *rval = NULL;
1421 // int new_link = LINK_STATE_DOWN;
1422
1423 LAGG_RLOCK_ASSERT(sc);
1424 /*
1425 * Search a port which reports an active link state.
1426 */
1427
1428 if (lp == NULL)
1429 goto search;
1430 if (LAGG_PORTACTIVE(lp)) {
1431 rval = lp;
1432 goto found;
1433 }
1434 if ((lp_next = SLIST_NEXT(lp, lp_entries)) != NULL &&
1435 LAGG_PORTACTIVE(lp_next)) {
1436 rval = lp_next;
1437 goto found;
1438 }
1439
1440 search:
1441 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1442 if (LAGG_PORTACTIVE(lp_next)) {
1443 rval = lp_next;
1444 goto found;
1445 }
1446 }
1447
1448 found:
1449 if (rval != NULL) {
1450 /*
1451 * The IEEE 802.1D standard assumes that a lagg with
1452 * multiple ports is always full duplex. This is valid
1453 * for load sharing laggs and if at least two links
1454 * are active. Unfortunately, checking the latter would
1455 * be too expensive at this point.
1456 XXX
1457 if ((sc->sc_capabilities & IFCAP_LAGG_FULLDUPLEX) &&
1458 (sc->sc_count > 1))
1459 new_link = LINK_STATE_FULL_DUPLEX;
1460 else
1461 new_link = rval->lp_link_state;
1462 */
1463 }
1464
1465 return (rval);
1466 }
1467
1468 static const void *
1469 lagg_gethdr(struct mbuf *m, u_int off, u_int len, void *buf)
1470 {
1471 if (m->m_pkthdr.len < (off + len)) {
1472 return (NULL);
1473 } else if (m->m_len < (off + len)) {
1474 m_copydata(m, off, len, buf);
1475 return (buf);
1476 }
1477 return (mtod(m, char *) + off);
1478 }
1479
1480 uint32_t
1481 lagg_hashmbuf(struct lagg_softc *sc, struct mbuf *m, uint32_t key)
1482 {
1483 uint16_t etype;
1484 uint32_t p = key;
1485 int off;
1486 struct ether_header *eh;
1487 const struct ether_vlan_header *vlan;
1488 #ifdef INET
1489 const struct ip *ip;
1490 const uint32_t *ports;
1491 int iphlen;
1492 #endif
1493 #ifdef INET6
1494 const struct ip6_hdr *ip6;
1495 uint32_t flow;
1496 #endif
1497 union {
1498 #ifdef INET
1499 struct ip ip;
1500 #endif
1501 #ifdef INET6
1502 struct ip6_hdr ip6;
1503 #endif
1504 struct ether_vlan_header vlan;
1505 uint32_t port;
1506 } buf;
1507
1508
1509 off = sizeof(*eh);
1510 if (m->m_len < off)
1511 goto out;
1512 eh = mtod(m, struct ether_header *);
1513 etype = ntohs(eh->ether_type);
1514 if (sc->sc_flags & LAGG_F_HASHL2) {
1515 p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, p);
1516 p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p);
1517 }
1518
1519 /* Special handling for encapsulating VLAN frames */
1520 if ((m->m_flags & M_VLANTAG) && (sc->sc_flags & LAGG_F_HASHL2)) {
1521 p = hash32_buf(&m->m_pkthdr.ether_vtag,
1522 sizeof(m->m_pkthdr.ether_vtag), p);
1523 } else if (etype == ETHERTYPE_VLAN) {
1524 vlan = lagg_gethdr(m, off, sizeof(*vlan), &buf);
1525 if (vlan == NULL)
1526 goto out;
1527
1528 if (sc->sc_flags & LAGG_F_HASHL2)
1529 p = hash32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p);
1530 etype = ntohs(vlan->evl_proto);
1531 off += sizeof(*vlan) - sizeof(*eh);
1532 }
1533
1534 switch (etype) {
1535 #ifdef INET
1536 case ETHERTYPE_IP:
1537 ip = lagg_gethdr(m, off, sizeof(*ip), &buf);
1538 if (ip == NULL)
1539 goto out;
1540
1541 if (sc->sc_flags & LAGG_F_HASHL3) {
1542 p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p);
1543 p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p);
1544 }
1545 if (!(sc->sc_flags & LAGG_F_HASHL4))
1546 break;
1547 switch (ip->ip_p) {
1548 case IPPROTO_TCP:
1549 case IPPROTO_UDP:
1550 case IPPROTO_SCTP:
1551 iphlen = ip->ip_hl << 2;
1552 if (iphlen < sizeof(*ip))
1553 break;
1554 off += iphlen;
1555 ports = lagg_gethdr(m, off, sizeof(*ports), &buf);
1556 if (ports == NULL)
1557 break;
1558 p = hash32_buf(ports, sizeof(*ports), p);
1559 break;
1560 }
1561 break;
1562 #endif
1563 #ifdef INET6
1564 case ETHERTYPE_IPV6:
1565 if (!(sc->sc_flags & LAGG_F_HASHL3))
1566 break;
1567 ip6 = lagg_gethdr(m, off, sizeof(*ip6), &buf);
1568 if (ip6 == NULL)
1569 goto out;
1570
1571 p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p);
1572 p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p);
1573 flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK;
1574 p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */
1575 break;
1576 #endif
1577 }
1578 out:
1579 return (p);
1580 }
1581
1582 int
1583 lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
1584 {
1585
1586 return (ifp->if_transmit)(ifp, m);
1587 }
1588
1589 /*
1590 * Simple round robin aggregation
1591 */
1592
1593 static int
1594 lagg_rr_attach(struct lagg_softc *sc)
1595 {
1596 sc->sc_detach = lagg_rr_detach;
1597 sc->sc_start = lagg_rr_start;
1598 sc->sc_input = lagg_rr_input;
1599 sc->sc_port_create = NULL;
1600 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1601 sc->sc_seq = 0;
1602
1603 return (0);
1604 }
1605
1606 static int
1607 lagg_rr_detach(struct lagg_softc *sc)
1608 {
1609 return (0);
1610 }
1611
1612 static int
1613 lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
1614 {
1615 struct lagg_port *lp;
1616 uint32_t p;
1617
1618 p = atomic_fetchadd_32(&sc->sc_seq, 1);
1619 p %= sc->sc_count;
1620 lp = SLIST_FIRST(&sc->sc_ports);
1621 while (p--)
1622 lp = SLIST_NEXT(lp, lp_entries);
1623
1624 /*
1625 * Check the port's link state. This will return the next active
1626 * port if the link is down or the port is NULL.
1627 */
1628 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1629 m_freem(m);
1630 return (ENETDOWN);
1631 }
1632
1633 /* Send mbuf */
1634 return (lagg_enqueue(lp->lp_ifp, m));
1635 }
1636
1637 static struct mbuf *
1638 lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1639 {
1640 struct ifnet *ifp = sc->sc_ifp;
1641
1642 /* Just pass in the packet to our lagg device */
1643 m->m_pkthdr.rcvif = ifp;
1644
1645 return (m);
1646 }
1647
1648 /*
1649 * Active failover
1650 */
1651
1652 static int
1653 lagg_fail_attach(struct lagg_softc *sc)
1654 {
1655 sc->sc_detach = lagg_fail_detach;
1656 sc->sc_start = lagg_fail_start;
1657 sc->sc_input = lagg_fail_input;
1658 sc->sc_port_create = NULL;
1659 sc->sc_port_destroy = NULL;
1660
1661 return (0);
1662 }
1663
1664 static int
1665 lagg_fail_detach(struct lagg_softc *sc)
1666 {
1667 return (0);
1668 }
1669
1670 static int
1671 lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
1672 {
1673 struct lagg_port *lp;
1674
1675 /* Use the master port if active or the next available port */
1676 if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
1677 m_freem(m);
1678 return (ENETDOWN);
1679 }
1680
1681 /* Send mbuf */
1682 return (lagg_enqueue(lp->lp_ifp, m));
1683 }
1684
1685 static struct mbuf *
1686 lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1687 {
1688 struct ifnet *ifp = sc->sc_ifp;
1689 struct lagg_port *tmp_tp;
1690
1691 if (lp == sc->sc_primary || lagg_failover_rx_all) {
1692 m->m_pkthdr.rcvif = ifp;
1693 return (m);
1694 }
1695
1696 if (!LAGG_PORTACTIVE(sc->sc_primary)) {
1697 tmp_tp = lagg_link_active(sc, sc->sc_primary);
1698 /*
1699 * If tmp_tp is null, we've recieved a packet when all
1700 * our links are down. Weird, but process it anyways.
1701 */
1702 if ((tmp_tp == NULL || tmp_tp == lp)) {
1703 m->m_pkthdr.rcvif = ifp;
1704 return (m);
1705 }
1706 }
1707
1708 m_freem(m);
1709 return (NULL);
1710 }
1711
1712 /*
1713 * Loadbalancing
1714 */
1715
1716 static int
1717 lagg_lb_attach(struct lagg_softc *sc)
1718 {
1719 struct lagg_port *lp;
1720 struct lagg_lb *lb;
1721
1722 if ((lb = (struct lagg_lb *)malloc(sizeof(struct lagg_lb),
1723 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
1724 return (ENOMEM);
1725
1726 sc->sc_detach = lagg_lb_detach;
1727 sc->sc_start = lagg_lb_start;
1728 sc->sc_input = lagg_lb_input;
1729 sc->sc_port_create = lagg_lb_port_create;
1730 sc->sc_port_destroy = lagg_lb_port_destroy;
1731 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1732
1733 lb->lb_key = arc4random();
1734 sc->sc_psc = (caddr_t)lb;
1735
1736 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1737 lagg_lb_port_create(lp);
1738
1739 return (0);
1740 }
1741
1742 static int
1743 lagg_lb_detach(struct lagg_softc *sc)
1744 {
1745 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1746 if (lb != NULL)
1747 free(lb, M_DEVBUF);
1748 return (0);
1749 }
1750
1751 static int
1752 lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
1753 {
1754 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1755 struct lagg_port *lp_next;
1756 int i = 0;
1757
1758 bzero(&lb->lb_ports, sizeof(lb->lb_ports));
1759 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1760 if (lp_next == lp)
1761 continue;
1762 if (i >= LAGG_MAX_PORTS)
1763 return (EINVAL);
1764 if (sc->sc_ifflags & IFF_DEBUG)
1765 printf("%s: port %s at index %d\n",
1766 sc->sc_ifname, lp_next->lp_ifname, i);
1767 lb->lb_ports[i++] = lp_next;
1768 }
1769
1770 return (0);
1771 }
1772
1773 static int
1774 lagg_lb_port_create(struct lagg_port *lp)
1775 {
1776 struct lagg_softc *sc = lp->lp_softc;
1777 return (lagg_lb_porttable(sc, NULL));
1778 }
1779
1780 static void
1781 lagg_lb_port_destroy(struct lagg_port *lp)
1782 {
1783 struct lagg_softc *sc = lp->lp_softc;
1784 lagg_lb_porttable(sc, lp);
1785 }
1786
1787 static int
1788 lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
1789 {
1790 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1791 struct lagg_port *lp = NULL;
1792 uint32_t p = 0;
1793
1794 if (sc->use_flowid && (m->m_flags & M_FLOWID))
1795 p = m->m_pkthdr.flowid;
1796 else
1797 p = lagg_hashmbuf(sc, m, lb->lb_key);
1798 p %= sc->sc_count;
1799 lp = lb->lb_ports[p];
1800
1801 /*
1802 * Check the port's link state. This will return the next active
1803 * port if the link is down or the port is NULL.
1804 */
1805 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1806 m_freem(m);
1807 return (ENETDOWN);
1808 }
1809
1810 /* Send mbuf */
1811 return (lagg_enqueue(lp->lp_ifp, m));
1812 }
1813
1814 static struct mbuf *
1815 lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1816 {
1817 struct ifnet *ifp = sc->sc_ifp;
1818
1819 /* Just pass in the packet to our lagg device */
1820 m->m_pkthdr.rcvif = ifp;
1821
1822 return (m);
1823 }
1824
1825 /*
1826 * 802.3ad LACP
1827 */
1828
1829 static int
1830 lagg_lacp_attach(struct lagg_softc *sc)
1831 {
1832 struct lagg_port *lp;
1833 int error;
1834
1835 sc->sc_detach = lagg_lacp_detach;
1836 sc->sc_port_create = lacp_port_create;
1837 sc->sc_port_destroy = lacp_port_destroy;
1838 sc->sc_linkstate = lacp_linkstate;
1839 sc->sc_start = lagg_lacp_start;
1840 sc->sc_input = lagg_lacp_input;
1841 sc->sc_init = lacp_init;
1842 sc->sc_stop = lacp_stop;
1843 sc->sc_lladdr = lagg_lacp_lladdr;
1844 sc->sc_req = lacp_req;
1845 sc->sc_portreq = lacp_portreq;
1846
1847 error = lacp_attach(sc);
1848 if (error)
1849 return (error);
1850
1851 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1852 lacp_port_create(lp);
1853
1854 return (error);
1855 }
1856
1857 static int
1858 lagg_lacp_detach(struct lagg_softc *sc)
1859 {
1860 struct lagg_port *lp;
1861 int error;
1862
1863 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1864 lacp_port_destroy(lp);
1865
1866 /* unlocking is safe here */
1867 LAGG_WUNLOCK(sc);
1868 error = lacp_detach(sc);
1869 LAGG_WLOCK(sc);
1870
1871 return (error);
1872 }
1873
1874 static void
1875 lagg_lacp_lladdr(struct lagg_softc *sc)
1876 {
1877 struct lagg_port *lp;
1878
1879 /* purge all the lacp ports */
1880 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1881 lacp_port_destroy(lp);
1882
1883 /* add them back in */
1884 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1885 lacp_port_create(lp);
1886 }
1887
1888 static int
1889 lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
1890 {
1891 struct lagg_port *lp;
1892
1893 lp = lacp_select_tx_port(sc, m);
1894 if (lp == NULL) {
1895 m_freem(m);
1896 return (EBUSY);
1897 }
1898
1899 /* Send mbuf */
1900 return (lagg_enqueue(lp->lp_ifp, m));
1901 }
1902
1903 static struct mbuf *
1904 lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1905 {
1906 struct ifnet *ifp = sc->sc_ifp;
1907 struct ether_header *eh;
1908 u_short etype;
1909
1910 eh = mtod(m, struct ether_header *);
1911 etype = ntohs(eh->ether_type);
1912
1913 /* Tap off LACP control messages */
1914 if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_SLOW) {
1915 m = lacp_input(lp, m);
1916 if (m == NULL)
1917 return (NULL);
1918 }
1919
1920 /*
1921 * If the port is not collecting or not in the active aggregator then
1922 * free and return.
1923 */
1924 if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
1925 m_freem(m);
1926 return (NULL);
1927 }
1928
1929 m->m_pkthdr.rcvif = ifp;
1930 return (m);
1931 }
Cache object: 9c006a2987779db0222e53c43ced6596
|