FreeBSD/Linux Kernel Cross Reference
sys/net/if_lagg.c
1 /* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __FBSDID("$FreeBSD$");
22
23 #include "opt_inet.h"
24 #include "opt_inet6.h"
25
26 #include <sys/param.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/mbuf.h>
30 #include <sys/queue.h>
31 #include <sys/socket.h>
32 #include <sys/sockio.h>
33 #include <sys/sysctl.h>
34 #include <sys/module.h>
35 #include <sys/systm.h>
36 #include <sys/proc.h>
37 #include <sys/lock.h>
38 #include <sys/rwlock.h>
39 #include <sys/taskqueue.h>
40
41 #include <net/ethernet.h>
42 #include <net/if.h>
43 #include <net/if_clone.h>
44 #include <net/if_arp.h>
45 #include <net/if_dl.h>
46 #include <net/if_llc.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_var.h>
50 #include <net/bpf.h>
51
52 #ifdef INET
53 #include <netinet/in.h>
54 #include <netinet/in_systm.h>
55 #include <netinet/if_ether.h>
56 #include <netinet/ip.h>
57 #endif
58
59 #ifdef INET6
60 #include <netinet/ip6.h>
61 #endif
62
63 #include <net/if_vlan_var.h>
64 #include <net/if_lagg.h>
65 #include <net/ieee8023ad_lacp.h>
66
67 /* Special flags we should propagate to the lagg ports. */
68 static struct {
69 int flag;
70 int (*func)(struct ifnet *, int);
71 } lagg_pflags[] = {
72 {IFF_PROMISC, ifpromisc},
73 {IFF_ALLMULTI, if_allmulti},
74 {0, NULL}
75 };
76
77 SLIST_HEAD(__trhead, lagg_softc) lagg_list; /* list of laggs */
78 static struct mtx lagg_list_mtx;
79 eventhandler_tag lagg_detach_cookie = NULL;
80
81 static int lagg_clone_create(struct if_clone *, int);
82 static void lagg_clone_destroy(struct ifnet *);
83 static void lagg_lladdr(struct lagg_softc *, uint8_t *);
84 static void lagg_capabilities(struct lagg_softc *);
85 static void lagg_port_lladdr(struct lagg_port *, uint8_t *);
86 static void lagg_port_setlladdr(void *, int);
87 static int lagg_port_create(struct lagg_softc *, struct ifnet *);
88 static int lagg_port_destroy(struct lagg_port *, int);
89 static struct mbuf *lagg_input(struct ifnet *, struct mbuf *);
90 static void lagg_linkstate(struct lagg_softc *);
91 static void lagg_port_state(struct ifnet *, int);
92 static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
93 static int lagg_port_output(struct ifnet *, struct mbuf *,
94 struct sockaddr *, struct rtentry *);
95 static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
96 static int lagg_port_checkstacking(struct lagg_softc *);
97 static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
98 static void lagg_init(void *);
99 static void lagg_stop(struct lagg_softc *);
100 static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
101 static int lagg_ether_setmulti(struct lagg_softc *);
102 static int lagg_ether_cmdmulti(struct lagg_port *, int);
103 static void lagg_ether_purgemulti(struct lagg_softc *);
104 static int lagg_setflag(struct lagg_port *, int, int,
105 int (*func)(struct ifnet *, int));
106 static int lagg_setflags(struct lagg_port *, int status);
107 static void lagg_start(struct ifnet *);
108 static int lagg_media_change(struct ifnet *);
109 static void lagg_media_status(struct ifnet *, struct ifmediareq *);
110 static struct lagg_port *lagg_link_active(struct lagg_softc *,
111 struct lagg_port *);
112 static const void *lagg_gethdr(struct mbuf *, u_int, u_int, void *);
113
114 IFC_SIMPLE_DECLARE(lagg, 0);
115
116 /* Simple round robin */
117 static int lagg_rr_attach(struct lagg_softc *);
118 static int lagg_rr_detach(struct lagg_softc *);
119 static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
120 static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
121 struct mbuf *);
122
123 /* Active failover */
124 static int lagg_fail_attach(struct lagg_softc *);
125 static int lagg_fail_detach(struct lagg_softc *);
126 static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
127 static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
128 struct mbuf *);
129
130 /* Loadbalancing */
131 static int lagg_lb_attach(struct lagg_softc *);
132 static int lagg_lb_detach(struct lagg_softc *);
133 static int lagg_lb_port_create(struct lagg_port *);
134 static void lagg_lb_port_destroy(struct lagg_port *);
135 static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
136 static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
137 struct mbuf *);
138 static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
139
140 /* 802.3ad LACP */
141 static int lagg_lacp_attach(struct lagg_softc *);
142 static int lagg_lacp_detach(struct lagg_softc *);
143 static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
144 static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
145 struct mbuf *);
146 static void lagg_lacp_lladdr(struct lagg_softc *);
147
148 /* lagg protocol table */
149 static const struct {
150 int ti_proto;
151 int (*ti_attach)(struct lagg_softc *);
152 } lagg_protos[] = {
153 { LAGG_PROTO_ROUNDROBIN, lagg_rr_attach },
154 { LAGG_PROTO_FAILOVER, lagg_fail_attach },
155 { LAGG_PROTO_LOADBALANCE, lagg_lb_attach },
156 { LAGG_PROTO_ETHERCHANNEL, lagg_lb_attach },
157 { LAGG_PROTO_LACP, lagg_lacp_attach },
158 { LAGG_PROTO_NONE, NULL }
159 };
160
161 /*
162 * Return a 32-bit hash of the given buffer.
163 * XXX Taken from sys/sys/hash.h. This file can not be included directly due to
164 * compiler warnings that need an API change to fix.
165 */
166 #define HASHSTEP(x,c) (((x << 5) + x) + (c))
167 static __inline uint32_t
168 hash32_buf(const void *buf, size_t len, uint32_t hash)
169 {
170 const unsigned char *p = buf;
171
172 while (len--)
173 hash = HASHSTEP(hash, *p++);
174
175 return hash;
176 }
177 #undef HASHSTEP
178
179 static int
180 lagg_modevent(module_t mod, int type, void *data)
181 {
182
183 switch (type) {
184 case MOD_LOAD:
185 mtx_init(&lagg_list_mtx, "if_lagg list", NULL, MTX_DEF);
186 SLIST_INIT(&lagg_list);
187 if_clone_attach(&lagg_cloner);
188 lagg_input_p = lagg_input;
189 lagg_linkstate_p = lagg_port_state;
190 lagg_detach_cookie = EVENTHANDLER_REGISTER(
191 ifnet_departure_event, lagg_port_ifdetach, NULL,
192 EVENTHANDLER_PRI_ANY);
193 break;
194 case MOD_UNLOAD:
195 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
196 lagg_detach_cookie);
197 if_clone_detach(&lagg_cloner);
198 while (!SLIST_EMPTY(&lagg_list))
199 lagg_clone_destroy(SLIST_FIRST(&lagg_list)->sc_ifp);
200 lagg_input_p = NULL;
201 lagg_linkstate_p = NULL;
202 mtx_destroy(&lagg_list_mtx);
203 break;
204 default:
205 return (EOPNOTSUPP);
206 }
207 return (0);
208 }
209
210 static moduledata_t lagg_mod = {
211 "if_lagg",
212 lagg_modevent,
213 0
214 };
215
216 DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
217
218 static int
219 lagg_clone_create(struct if_clone *ifc, int unit)
220 {
221 struct lagg_softc *sc;
222 struct ifnet *ifp;
223 int i, error = 0;
224 static const u_char eaddr[6]; /* 00:00:00:00:00:00 */
225
226 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
227 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
228 if (ifp == NULL) {
229 free(sc, M_DEVBUF);
230 return (ENOSPC);
231 }
232
233 sc->sc_proto = LAGG_PROTO_NONE;
234 for (i = 0; lagg_protos[i].ti_proto != LAGG_PROTO_NONE; i++) {
235 if (lagg_protos[i].ti_proto == LAGG_PROTO_DEFAULT) {
236 sc->sc_proto = lagg_protos[i].ti_proto;
237 if ((error = lagg_protos[i].ti_attach(sc)) != 0) {
238 if_free_type(ifp, IFT_ETHER);
239 free(sc, M_DEVBUF);
240 return (error);
241 }
242 break;
243 }
244 }
245 LAGG_LOCK_INIT(sc);
246 SLIST_INIT(&sc->sc_ports);
247 TASK_INIT(&sc->sc_lladdr_task, 0, lagg_port_setlladdr, sc);
248
249 /* Initialise pseudo media types */
250 ifmedia_init(&sc->sc_media, 0, lagg_media_change,
251 lagg_media_status);
252 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
253 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
254
255 if_initname(ifp, ifc->ifc_name, unit);
256 ifp->if_type = IFT_ETHER;
257 ifp->if_softc = sc;
258 ifp->if_start = lagg_start;
259 ifp->if_init = lagg_init;
260 ifp->if_ioctl = lagg_ioctl;
261 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
262
263 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
264 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
265 IFQ_SET_READY(&ifp->if_snd);
266
267 /*
268 * Attach as an ordinary ethernet device, childs will be attached
269 * as special device IFT_IEEE8023ADLAG.
270 */
271 ether_ifattach(ifp, eaddr);
272
273 /* Insert into the global list of laggs */
274 mtx_lock(&lagg_list_mtx);
275 SLIST_INSERT_HEAD(&lagg_list, sc, sc_entries);
276 mtx_unlock(&lagg_list_mtx);
277
278 return (0);
279 }
280
281 static void
282 lagg_clone_destroy(struct ifnet *ifp)
283 {
284 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
285 struct lagg_port *lp;
286
287 LAGG_WLOCK(sc);
288
289 lagg_stop(sc);
290 ifp->if_flags &= ~IFF_UP;
291
292 /* Shutdown and remove lagg ports */
293 while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL)
294 lagg_port_destroy(lp, 1);
295 /* Unhook the aggregation protocol */
296 (*sc->sc_detach)(sc);
297
298 /* Remove any multicast groups that we may have joined. */
299 lagg_ether_purgemulti(sc);
300
301 LAGG_WUNLOCK(sc);
302
303 ifmedia_removeall(&sc->sc_media);
304 ether_ifdetach(ifp);
305 if_free_type(ifp, IFT_ETHER);
306
307 mtx_lock(&lagg_list_mtx);
308 SLIST_REMOVE(&lagg_list, sc, lagg_softc, sc_entries);
309 mtx_unlock(&lagg_list_mtx);
310
311 taskqueue_drain(taskqueue_swi, &sc->sc_lladdr_task);
312 LAGG_LOCK_DESTROY(sc);
313 free(sc, M_DEVBUF);
314 }
315
316 static void
317 lagg_lladdr(struct lagg_softc *sc, uint8_t *lladdr)
318 {
319 struct ifnet *ifp = sc->sc_ifp;
320
321 if (memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
322 return;
323
324 bcopy(lladdr, IFP2ENADDR(ifp), ETHER_ADDR_LEN);
325 bcopy(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN);
326 /* Let the protocol know the MAC has changed */
327 if (sc->sc_lladdr != NULL)
328 (*sc->sc_lladdr)(sc);
329 }
330
331 static void
332 lagg_capabilities(struct lagg_softc *sc)
333 {
334 struct lagg_port *lp;
335 int cap = ~0, ena = ~0;
336
337 LAGG_WLOCK_ASSERT(sc);
338
339 /* Get capabilities from the lagg ports */
340 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
341 cap &= lp->lp_ifp->if_capabilities;
342 ena &= lp->lp_ifp->if_capenable;
343 }
344 cap = (cap == ~0 ? 0 : cap);
345 ena = (ena == ~0 ? 0 : ena);
346
347 if (sc->sc_ifp->if_capabilities != cap ||
348 sc->sc_ifp->if_capenable != ena) {
349 sc->sc_ifp->if_capabilities = cap;
350 sc->sc_ifp->if_capenable = ena;
351 getmicrotime(&sc->sc_ifp->if_lastchange);
352
353 if (sc->sc_ifflags & IFF_DEBUG)
354 if_printf(sc->sc_ifp,
355 "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
356 }
357 }
358
359 static void
360 lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr)
361 {
362 struct lagg_softc *sc = lp->lp_softc;
363 struct ifnet *ifp = lp->lp_ifp;
364 struct lagg_llq *llq;
365 int pending = 0;
366
367 LAGG_WLOCK_ASSERT(sc);
368
369 if (lp->lp_detaching ||
370 memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
371 return;
372
373 /* Check to make sure its not already queued to be changed */
374 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
375 if (llq->llq_ifp == ifp) {
376 pending = 1;
377 break;
378 }
379 }
380
381 if (!pending) {
382 llq = malloc(sizeof(struct lagg_llq), M_DEVBUF, M_NOWAIT);
383 if (llq == NULL) /* XXX what to do */
384 return;
385 }
386
387 /* Update the lladdr even if pending, it may have changed */
388 llq->llq_ifp = ifp;
389 bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN);
390
391 if (!pending)
392 SLIST_INSERT_HEAD(&sc->sc_llq_head, llq, llq_entries);
393
394 taskqueue_enqueue(taskqueue_swi, &sc->sc_lladdr_task);
395 }
396
397 /*
398 * Set the interface MAC address from a taskqueue to avoid a LOR.
399 */
400 static void
401 lagg_port_setlladdr(void *arg, int pending)
402 {
403 struct lagg_softc *sc = (struct lagg_softc *)arg;
404 struct lagg_llq *llq, *head;
405 struct ifnet *ifp;
406 int error;
407
408 /* Grab a local reference of the queue and remove it from the softc */
409 LAGG_WLOCK(sc);
410 head = SLIST_FIRST(&sc->sc_llq_head);
411 SLIST_FIRST(&sc->sc_llq_head) = NULL;
412 LAGG_WUNLOCK(sc);
413
414 /*
415 * Traverse the queue and set the lladdr on each ifp. It is safe to do
416 * unlocked as we have the only reference to it.
417 */
418 for (llq = head; llq != NULL; llq = head) {
419 ifp = llq->llq_ifp;
420
421 /* Set the link layer address */
422 error = if_setlladdr(ifp, llq->llq_lladdr, ETHER_ADDR_LEN);
423 if (error)
424 printf("%s: setlladdr failed on %s\n", __func__,
425 ifp->if_xname);
426
427 head = SLIST_NEXT(llq, llq_entries);
428 free(llq, M_DEVBUF);
429 }
430 }
431
432 static int
433 lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
434 {
435 struct lagg_softc *sc_ptr;
436 struct lagg_port *lp;
437 int error = 0;
438
439 LAGG_WLOCK_ASSERT(sc);
440
441 /* Limit the maximal number of lagg ports */
442 if (sc->sc_count >= LAGG_MAX_PORTS)
443 return (ENOSPC);
444
445 /* New lagg port has to be in an idle state */
446 if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
447 return (EBUSY);
448
449 /* Check if port has already been associated to a lagg */
450 if (ifp->if_lagg != NULL)
451 return (EBUSY);
452
453 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */
454 if (ifp->if_type != IFT_ETHER)
455 return (EPROTONOSUPPORT);
456
457 /* Allow the first Ethernet member to define the MTU */
458 if (SLIST_EMPTY(&sc->sc_ports))
459 sc->sc_ifp->if_mtu = ifp->if_mtu;
460 else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
461 if_printf(sc->sc_ifp, "invalid MTU for %s\n",
462 ifp->if_xname);
463 return (EINVAL);
464 }
465
466 if ((lp = malloc(sizeof(struct lagg_port),
467 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
468 return (ENOMEM);
469
470 /* Check if port is a stacked lagg */
471 mtx_lock(&lagg_list_mtx);
472 SLIST_FOREACH(sc_ptr, &lagg_list, sc_entries) {
473 if (ifp == sc_ptr->sc_ifp) {
474 mtx_unlock(&lagg_list_mtx);
475 free(lp, M_DEVBUF);
476 return (EINVAL);
477 /* XXX disable stacking for the moment, its untested
478 lp->lp_flags |= LAGG_PORT_STACK;
479 if (lagg_port_checkstacking(sc_ptr) >=
480 LAGG_MAX_STACKING) {
481 mtx_unlock(&lagg_list_mtx);
482 free(lp, M_DEVBUF);
483 return (E2BIG);
484 }
485 */
486 }
487 }
488 mtx_unlock(&lagg_list_mtx);
489
490 /* Change the interface type */
491 lp->lp_iftype = ifp->if_type;
492 ifp->if_type = IFT_IEEE8023ADLAG;
493 ifp->if_lagg = lp;
494 lp->lp_ioctl = ifp->if_ioctl;
495 ifp->if_ioctl = lagg_port_ioctl;
496 lp->lp_output = ifp->if_output;
497 ifp->if_output = lagg_port_output;
498
499 lp->lp_ifp = ifp;
500 lp->lp_softc = sc;
501
502 /* Save port link layer address */
503 bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
504
505 if (SLIST_EMPTY(&sc->sc_ports)) {
506 sc->sc_primary = lp;
507 lagg_lladdr(sc, IF_LLADDR(ifp));
508 } else {
509 /* Update link layer address for this port */
510 lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp));
511 }
512
513 /* Insert into the list of ports */
514 SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
515 sc->sc_count++;
516
517 /* Update lagg capabilities */
518 lagg_capabilities(sc);
519 lagg_linkstate(sc);
520
521 /* Add multicast addresses and interface flags to this port */
522 lagg_ether_cmdmulti(lp, 1);
523 lagg_setflags(lp, 1);
524
525 if (sc->sc_port_create != NULL)
526 error = (*sc->sc_port_create)(lp);
527 if (error) {
528 /* remove the port again, without calling sc_port_destroy */
529 lagg_port_destroy(lp, 0);
530 return (error);
531 }
532
533 return (error);
534 }
535
536 static int
537 lagg_port_checkstacking(struct lagg_softc *sc)
538 {
539 struct lagg_softc *sc_ptr;
540 struct lagg_port *lp;
541 int m = 0;
542
543 LAGG_WLOCK_ASSERT(sc);
544
545 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
546 if (lp->lp_flags & LAGG_PORT_STACK) {
547 sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
548 m = MAX(m, lagg_port_checkstacking(sc_ptr));
549 }
550 }
551
552 return (m + 1);
553 }
554
555 static int
556 lagg_port_destroy(struct lagg_port *lp, int runpd)
557 {
558 struct lagg_softc *sc = lp->lp_softc;
559 struct lagg_port *lp_ptr;
560 struct lagg_llq *llq;
561 struct ifnet *ifp = lp->lp_ifp;
562
563 LAGG_WLOCK_ASSERT(sc);
564
565 if (runpd && sc->sc_port_destroy != NULL)
566 (*sc->sc_port_destroy)(lp);
567
568 /*
569 * Remove multicast addresses and interface flags from this port and
570 * reset the MAC address, skip if the interface is being detached.
571 */
572 if (!lp->lp_detaching) {
573 lagg_ether_cmdmulti(lp, 0);
574 lagg_setflags(lp, 0);
575 lagg_port_lladdr(lp, lp->lp_lladdr);
576 }
577
578 /* Restore interface */
579 ifp->if_type = lp->lp_iftype;
580 ifp->if_ioctl = lp->lp_ioctl;
581 ifp->if_output = lp->lp_output;
582 ifp->if_lagg = NULL;
583
584 /* Finally, remove the port from the lagg */
585 SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
586 sc->sc_count--;
587
588 /* Update the primary interface */
589 if (lp == sc->sc_primary) {
590 uint8_t lladdr[ETHER_ADDR_LEN];
591
592 if ((lp_ptr = SLIST_FIRST(&sc->sc_ports)) == NULL) {
593 bzero(&lladdr, ETHER_ADDR_LEN);
594 } else {
595 bcopy(lp_ptr->lp_lladdr,
596 lladdr, ETHER_ADDR_LEN);
597 }
598 lagg_lladdr(sc, lladdr);
599 sc->sc_primary = lp_ptr;
600
601 /* Update link layer address for each port */
602 SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
603 lagg_port_lladdr(lp_ptr, lladdr);
604 }
605
606 /* Remove any pending lladdr changes from the queue */
607 if (lp->lp_detaching) {
608 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
609 if (llq->llq_ifp == ifp) {
610 SLIST_REMOVE(&sc->sc_llq_head, llq, lagg_llq,
611 llq_entries);
612 free(llq, M_DEVBUF);
613 break; /* Only appears once */
614 }
615 }
616 }
617
618 if (lp->lp_ifflags)
619 if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
620
621 free(lp, M_DEVBUF);
622
623 /* Update lagg capabilities */
624 lagg_capabilities(sc);
625 lagg_linkstate(sc);
626
627 return (0);
628 }
629
630 static int
631 lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
632 {
633 struct lagg_reqport *rp = (struct lagg_reqport *)data;
634 struct lagg_softc *sc;
635 struct lagg_port *lp = NULL;
636 int error = 0;
637
638 /* Should be checked by the caller */
639 if (ifp->if_type != IFT_IEEE8023ADLAG ||
640 (lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
641 goto fallback;
642
643 switch (cmd) {
644 case SIOCGLAGGPORT:
645 if (rp->rp_portname[0] == '\0' ||
646 ifunit(rp->rp_portname) != ifp) {
647 error = EINVAL;
648 break;
649 }
650
651 LAGG_RLOCK(sc);
652 if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
653 error = ENOENT;
654 LAGG_RUNLOCK(sc);
655 break;
656 }
657
658 lagg_port2req(lp, rp);
659 LAGG_RUNLOCK(sc);
660 break;
661
662 case SIOCSIFCAP:
663 if (lp->lp_ioctl == NULL) {
664 error = EINVAL;
665 break;
666 }
667 error = (*lp->lp_ioctl)(ifp, cmd, data);
668 if (error)
669 break;
670
671 /* Update lagg interface capabilities */
672 LAGG_WLOCK(sc);
673 lagg_capabilities(sc);
674 LAGG_WUNLOCK(sc);
675 break;
676
677 case SIOCSIFMTU:
678 /* Do not allow the MTU to be changed once joined */
679 error = EINVAL;
680 break;
681
682 default:
683 goto fallback;
684 }
685
686 return (error);
687
688 fallback:
689 if (lp->lp_ioctl != NULL)
690 return ((*lp->lp_ioctl)(ifp, cmd, data));
691
692 return (EINVAL);
693 }
694
695 static int
696 lagg_port_output(struct ifnet *ifp, struct mbuf *m,
697 struct sockaddr *dst, struct rtentry *rt0)
698 {
699 struct lagg_port *lp = ifp->if_lagg;
700 struct ether_header *eh;
701 short type = 0;
702
703 switch (dst->sa_family) {
704 case pseudo_AF_HDRCMPLT:
705 case AF_UNSPEC:
706 eh = (struct ether_header *)dst->sa_data;
707 type = eh->ether_type;
708 break;
709 }
710
711 /*
712 * Only allow ethernet types required to initiate or maintain the link,
713 * aggregated frames take a different path.
714 */
715 switch (ntohs(type)) {
716 case ETHERTYPE_PAE: /* EAPOL PAE/802.1x */
717 return ((*lp->lp_output)(ifp, m, dst, rt0));
718 }
719
720 /* drop any other frames */
721 m_freem(m);
722 return (EBUSY);
723 }
724
725 static void
726 lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
727 {
728 struct lagg_port *lp;
729 struct lagg_softc *sc;
730
731 if ((lp = ifp->if_lagg) == NULL)
732 return;
733
734 sc = lp->lp_softc;
735
736 LAGG_WLOCK(sc);
737 lp->lp_detaching = 1;
738 lagg_port_destroy(lp, 1);
739 LAGG_WUNLOCK(sc);
740 }
741
742 static void
743 lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
744 {
745 struct lagg_softc *sc = lp->lp_softc;
746
747 strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
748 strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
749 rp->rp_prio = lp->lp_prio;
750 rp->rp_flags = lp->lp_flags;
751 if (sc->sc_portreq != NULL)
752 (*sc->sc_portreq)(lp, (caddr_t)&rp->rp_psc);
753
754 /* Add protocol specific flags */
755 switch (sc->sc_proto) {
756 case LAGG_PROTO_FAILOVER:
757 if (lp == sc->sc_primary)
758 rp->rp_flags |= LAGG_PORT_MASTER;
759 if (lp == lagg_link_active(sc, sc->sc_primary))
760 rp->rp_flags |= LAGG_PORT_ACTIVE;
761 break;
762
763 case LAGG_PROTO_ROUNDROBIN:
764 case LAGG_PROTO_LOADBALANCE:
765 case LAGG_PROTO_ETHERCHANNEL:
766 if (LAGG_PORTACTIVE(lp))
767 rp->rp_flags |= LAGG_PORT_ACTIVE;
768 break;
769
770 case LAGG_PROTO_LACP:
771 /* LACP has a different definition of active */
772 if (lacp_isactive(lp))
773 rp->rp_flags |= LAGG_PORT_ACTIVE;
774 if (lacp_iscollecting(lp))
775 rp->rp_flags |= LAGG_PORT_COLLECTING;
776 if (lacp_isdistributing(lp))
777 rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
778 break;
779 }
780
781 }
782
783 static void
784 lagg_init(void *xsc)
785 {
786 struct lagg_softc *sc = (struct lagg_softc *)xsc;
787 struct lagg_port *lp;
788 struct ifnet *ifp = sc->sc_ifp;
789
790 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
791 return;
792
793 LAGG_WLOCK(sc);
794
795 ifp->if_drv_flags |= IFF_DRV_RUNNING;
796 /* Update the port lladdrs */
797 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
798 lagg_port_lladdr(lp, IF_LLADDR(ifp));
799
800 if (sc->sc_init != NULL)
801 (*sc->sc_init)(sc);
802
803 LAGG_WUNLOCK(sc);
804 }
805
806 static void
807 lagg_stop(struct lagg_softc *sc)
808 {
809 struct ifnet *ifp = sc->sc_ifp;
810
811 LAGG_WLOCK_ASSERT(sc);
812
813 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
814 return;
815
816 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
817
818 if (sc->sc_stop != NULL)
819 (*sc->sc_stop)(sc);
820 }
821
822 static int
823 lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
824 {
825 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
826 struct lagg_reqall *ra = (struct lagg_reqall *)data;
827 struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
828 struct ifreq *ifr = (struct ifreq *)data;
829 struct lagg_port *lp;
830 struct ifnet *tpif;
831 struct thread *td = curthread;
832 char *buf, *outbuf;
833 int count, buflen, len, error = 0;
834
835 bzero(&rpbuf, sizeof(rpbuf));
836
837 switch (cmd) {
838 case SIOCGLAGG:
839 LAGG_RLOCK(sc);
840 count = 0;
841 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
842 count++;
843 buflen = count * sizeof(struct lagg_reqport);
844 LAGG_RUNLOCK(sc);
845
846 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
847
848 LAGG_RLOCK(sc);
849 ra->ra_proto = sc->sc_proto;
850 if (sc->sc_req != NULL)
851 (*sc->sc_req)(sc, (caddr_t)&ra->ra_psc);
852
853 count = 0;
854 buf = outbuf;
855 len = min(ra->ra_size, buflen);
856 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
857 if (len < sizeof(rpbuf))
858 break;
859
860 lagg_port2req(lp, &rpbuf);
861 memcpy(buf, &rpbuf, sizeof(rpbuf));
862 count++;
863 buf += sizeof(rpbuf);
864 len -= sizeof(rpbuf);
865 }
866 LAGG_RUNLOCK(sc);
867 ra->ra_ports = count;
868 ra->ra_size = count * sizeof(rpbuf);
869 error = copyout(outbuf, ra->ra_port, ra->ra_size);
870 free(outbuf, M_TEMP);
871 break;
872 case SIOCSLAGG:
873 error = suser(td);
874 if (error)
875 break;
876 if (ra->ra_proto >= LAGG_PROTO_MAX) {
877 error = EPROTONOSUPPORT;
878 break;
879 }
880 if (sc->sc_proto != LAGG_PROTO_NONE) {
881 LAGG_WLOCK(sc);
882 error = sc->sc_detach(sc);
883 /* Reset protocol and pointers */
884 sc->sc_proto = LAGG_PROTO_NONE;
885 sc->sc_detach = NULL;
886 sc->sc_start = NULL;
887 sc->sc_input = NULL;
888 sc->sc_port_create = NULL;
889 sc->sc_port_destroy = NULL;
890 sc->sc_linkstate = NULL;
891 sc->sc_init = NULL;
892 sc->sc_stop = NULL;
893 sc->sc_lladdr = NULL;
894 sc->sc_req = NULL;
895 sc->sc_portreq = NULL;
896 LAGG_WUNLOCK(sc);
897 }
898 if (error != 0)
899 break;
900 for (int i = 0; i < (sizeof(lagg_protos) /
901 sizeof(lagg_protos[0])); i++) {
902 if (lagg_protos[i].ti_proto == ra->ra_proto) {
903 if (sc->sc_ifflags & IFF_DEBUG)
904 printf("%s: using proto %u\n",
905 sc->sc_ifname,
906 lagg_protos[i].ti_proto);
907 LAGG_WLOCK(sc);
908 sc->sc_proto = lagg_protos[i].ti_proto;
909 if (sc->sc_proto != LAGG_PROTO_NONE)
910 error = lagg_protos[i].ti_attach(sc);
911 LAGG_WUNLOCK(sc);
912 return (error);
913 }
914 }
915 error = EPROTONOSUPPORT;
916 break;
917 case SIOCGLAGGPORT:
918 if (rp->rp_portname[0] == '\0' ||
919 (tpif = ifunit(rp->rp_portname)) == NULL) {
920 error = EINVAL;
921 break;
922 }
923
924 LAGG_RLOCK(sc);
925 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
926 lp->lp_softc != sc) {
927 error = ENOENT;
928 LAGG_RUNLOCK(sc);
929 break;
930 }
931
932 lagg_port2req(lp, rp);
933 LAGG_RUNLOCK(sc);
934 break;
935 case SIOCSLAGGPORT:
936 error = suser(td);
937 if (error)
938 break;
939 if (rp->rp_portname[0] == '\0' ||
940 (tpif = ifunit(rp->rp_portname)) == NULL) {
941 error = EINVAL;
942 break;
943 }
944 LAGG_WLOCK(sc);
945 error = lagg_port_create(sc, tpif);
946 LAGG_WUNLOCK(sc);
947 break;
948 case SIOCSLAGGDELPORT:
949 error = suser(td);
950 if (error)
951 break;
952 if (rp->rp_portname[0] == '\0' ||
953 (tpif = ifunit(rp->rp_portname)) == NULL) {
954 error = EINVAL;
955 break;
956 }
957
958 LAGG_WLOCK(sc);
959 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
960 lp->lp_softc != sc) {
961 error = ENOENT;
962 LAGG_WUNLOCK(sc);
963 break;
964 }
965
966 error = lagg_port_destroy(lp, 1);
967 LAGG_WUNLOCK(sc);
968 break;
969 case SIOCSIFFLAGS:
970 /* Set flags on ports too */
971 LAGG_WLOCK(sc);
972 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
973 lagg_setflags(lp, 1);
974 }
975 LAGG_WUNLOCK(sc);
976
977 if (!(ifp->if_flags & IFF_UP) &&
978 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
979 /*
980 * If interface is marked down and it is running,
981 * then stop and disable it.
982 */
983 LAGG_WLOCK(sc);
984 lagg_stop(sc);
985 LAGG_WUNLOCK(sc);
986 } else if ((ifp->if_flags & IFF_UP) &&
987 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
988 /*
989 * If interface is marked up and it is stopped, then
990 * start it.
991 */
992 (*ifp->if_init)(sc);
993 }
994 break;
995 case SIOCADDMULTI:
996 case SIOCDELMULTI:
997 LAGG_WLOCK(sc);
998 error = lagg_ether_setmulti(sc);
999 LAGG_WUNLOCK(sc);
1000 break;
1001 case SIOCSIFMEDIA:
1002 case SIOCGIFMEDIA:
1003 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1004 break;
1005
1006 case SIOCSIFCAP:
1007 case SIOCSIFMTU:
1008 /* Do not allow the MTU or caps to be directly changed */
1009 error = EINVAL;
1010 break;
1011
1012 default:
1013 error = ether_ioctl(ifp, cmd, data);
1014 break;
1015 }
1016 return (error);
1017 }
1018
1019 static int
1020 lagg_ether_setmulti(struct lagg_softc *sc)
1021 {
1022 struct ifnet *scifp = sc->sc_ifp;
1023 struct ifnet *ifp;
1024 struct ifmultiaddr *ifma, *rifma = NULL;
1025 struct lagg_port *lp;
1026 struct lagg_mc *mc;
1027 struct sockaddr_dl sdl;
1028 int error;
1029
1030 LAGG_WLOCK_ASSERT(sc);
1031
1032 bzero((char *)&sdl, sizeof(sdl));
1033 sdl.sdl_len = sizeof(sdl);
1034 sdl.sdl_family = AF_LINK;
1035 sdl.sdl_type = IFT_ETHER;
1036 sdl.sdl_alen = ETHER_ADDR_LEN;
1037
1038 /* First, remove any existing filter entries. */
1039 lagg_ether_purgemulti(sc);
1040
1041 /* Now program new ones. */
1042 TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
1043 if (ifma->ifma_addr->sa_family != AF_LINK)
1044 continue;
1045 mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT);
1046 if (mc == NULL)
1047 return (ENOMEM);
1048 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1049 (char *)&mc->mc_addr, ETHER_ADDR_LEN);
1050 SLIST_INSERT_HEAD(&sc->sc_mc_head, mc, mc_entries);
1051 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1052 LLADDR(&sdl), ETHER_ADDR_LEN);
1053
1054 /* do all the ports */
1055 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1056 ifp = lp->lp_ifp;
1057 sdl.sdl_index = ifp->if_index;
1058 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
1059 if (error)
1060 return (error);
1061 }
1062 }
1063 return (0);
1064 }
1065
1066 static int
1067 lagg_ether_cmdmulti(struct lagg_port *lp, int set)
1068 {
1069 struct lagg_softc *sc = lp->lp_softc;
1070 struct ifnet *ifp = lp->lp_ifp;;
1071 struct lagg_mc *mc;
1072 struct ifmultiaddr *rifma = NULL;
1073 struct sockaddr_dl sdl;
1074 int error;
1075
1076 LAGG_WLOCK_ASSERT(sc);
1077
1078 bzero((char *)&sdl, sizeof(sdl));
1079 sdl.sdl_len = sizeof(sdl);
1080 sdl.sdl_family = AF_LINK;
1081 sdl.sdl_type = IFT_ETHER;
1082 sdl.sdl_alen = ETHER_ADDR_LEN;
1083 sdl.sdl_index = ifp->if_index;
1084
1085 SLIST_FOREACH(mc, &sc->sc_mc_head, mc_entries) {
1086 bcopy((char *)&mc->mc_addr, LLADDR(&sdl), ETHER_ADDR_LEN);
1087
1088 if (set)
1089 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
1090 else
1091 error = if_delmulti(ifp, (struct sockaddr *)&sdl);
1092
1093 if (error) {
1094 printf("cmdmulti error on %s, set = %d\n",
1095 ifp->if_xname, set);
1096 return (error);
1097 }
1098 }
1099 return (0);
1100 }
1101
1102 static void
1103 lagg_ether_purgemulti(struct lagg_softc *sc)
1104 {
1105 struct lagg_port *lp;
1106 struct lagg_mc *mc;
1107
1108 LAGG_WLOCK_ASSERT(sc);
1109
1110 /* remove from ports */
1111 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1112 lagg_ether_cmdmulti(lp, 0);
1113
1114 while ((mc = SLIST_FIRST(&sc->sc_mc_head)) != NULL) {
1115 SLIST_REMOVE(&sc->sc_mc_head, mc, lagg_mc, mc_entries);
1116 free(mc, M_DEVBUF);
1117 }
1118 }
1119
1120 /* Handle a ref counted flag that should be set on the lagg port as well */
1121 static int
1122 lagg_setflag(struct lagg_port *lp, int flag, int status,
1123 int (*func)(struct ifnet *, int))
1124 {
1125 struct lagg_softc *sc = lp->lp_softc;
1126 struct ifnet *scifp = sc->sc_ifp;
1127 struct ifnet *ifp = lp->lp_ifp;
1128 int error;
1129
1130 LAGG_WLOCK_ASSERT(sc);
1131
1132 status = status ? (scifp->if_flags & flag) : 0;
1133 /* Now "status" contains the flag value or 0 */
1134
1135 /*
1136 * See if recorded ports status is different from what
1137 * we want it to be. If it is, flip it. We record ports
1138 * status in lp_ifflags so that we won't clear ports flag
1139 * we haven't set. In fact, we don't clear or set ports
1140 * flags directly, but get or release references to them.
1141 * That's why we can be sure that recorded flags still are
1142 * in accord with actual ports flags.
1143 */
1144 if (status != (lp->lp_ifflags & flag)) {
1145 error = (*func)(ifp, status);
1146 if (error)
1147 return (error);
1148 lp->lp_ifflags &= ~flag;
1149 lp->lp_ifflags |= status;
1150 }
1151 return (0);
1152 }
1153
1154 /*
1155 * Handle IFF_* flags that require certain changes on the lagg port
1156 * if "status" is true, update ports flags respective to the lagg
1157 * if "status" is false, forcedly clear the flags set on port.
1158 */
1159 static int
1160 lagg_setflags(struct lagg_port *lp, int status)
1161 {
1162 int error, i;
1163
1164 for (i = 0; lagg_pflags[i].flag; i++) {
1165 error = lagg_setflag(lp, lagg_pflags[i].flag,
1166 status, lagg_pflags[i].func);
1167 if (error)
1168 return (error);
1169 }
1170 return (0);
1171 }
1172
1173 static void
1174 lagg_start(struct ifnet *ifp)
1175 {
1176 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1177 struct mbuf *m;
1178 int error = 0;
1179
1180 LAGG_RLOCK(sc);
1181 for (;; error = 0) {
1182 IFQ_DEQUEUE(&ifp->if_snd, m);
1183 if (m == NULL)
1184 break;
1185
1186 ETHER_BPF_MTAP(ifp, m);
1187
1188 /* We need a Tx algorithm and at least one port */
1189 if (sc->sc_proto != LAGG_PROTO_NONE && sc->sc_count)
1190 error = (*sc->sc_start)(sc, m);
1191 else
1192 m_freem(m);
1193
1194 if (error == 0)
1195 ifp->if_opackets++;
1196 else
1197 ifp->if_oerrors++;
1198 }
1199 LAGG_RUNLOCK(sc);
1200
1201 return;
1202 }
1203
1204 static struct mbuf *
1205 lagg_input(struct ifnet *ifp, struct mbuf *m)
1206 {
1207 struct lagg_port *lp = ifp->if_lagg;
1208 struct lagg_softc *sc = lp->lp_softc;
1209 struct ifnet *scifp = sc->sc_ifp;
1210
1211 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1212 (lp->lp_flags & LAGG_PORT_DISABLED) ||
1213 sc->sc_proto == LAGG_PROTO_NONE) {
1214 m_freem(m);
1215 return (NULL);
1216 }
1217
1218 LAGG_RLOCK(sc);
1219 ETHER_BPF_MTAP(scifp, m);
1220
1221 m = (*sc->sc_input)(sc, lp, m);
1222
1223 if (m != NULL) {
1224 scifp->if_ipackets++;
1225 scifp->if_ibytes += m->m_pkthdr.len;
1226
1227 if (scifp->if_flags & IFF_MONITOR) {
1228 m_freem(m);
1229 m = NULL;
1230 }
1231 }
1232
1233 LAGG_RUNLOCK(sc);
1234 return (m);
1235 }
1236
1237 static int
1238 lagg_media_change(struct ifnet *ifp)
1239 {
1240 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1241
1242 if (sc->sc_ifflags & IFF_DEBUG)
1243 printf("%s\n", __func__);
1244
1245 /* Ignore */
1246 return (0);
1247 }
1248
1249 static void
1250 lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1251 {
1252 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1253 struct lagg_port *lp;
1254
1255 imr->ifm_status = IFM_AVALID;
1256 imr->ifm_active = IFM_ETHER | IFM_AUTO;
1257
1258 LAGG_RLOCK(sc);
1259 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1260 if (LAGG_PORTACTIVE(lp))
1261 imr->ifm_status |= IFM_ACTIVE;
1262 }
1263 LAGG_RUNLOCK(sc);
1264 }
1265
1266 static void
1267 lagg_linkstate(struct lagg_softc *sc)
1268 {
1269 struct lagg_port *lp;
1270 int new_link = LINK_STATE_DOWN;
1271
1272 /* Our link is considered up if at least one of our ports is active */
1273 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1274 if (lp->lp_link_state == LINK_STATE_UP) {
1275 new_link = LINK_STATE_UP;
1276 break;
1277 }
1278 }
1279 if_link_state_change(sc->sc_ifp, new_link);
1280 }
1281
1282 static void
1283 lagg_port_state(struct ifnet *ifp, int state)
1284 {
1285 struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
1286 struct lagg_softc *sc = NULL;
1287
1288 if (lp != NULL)
1289 sc = lp->lp_softc;
1290 if (sc == NULL)
1291 return;
1292
1293 LAGG_WLOCK(sc);
1294 lagg_linkstate(sc);
1295 if (sc->sc_linkstate != NULL)
1296 (*sc->sc_linkstate)(lp);
1297 LAGG_WUNLOCK(sc);
1298 }
1299
1300 struct lagg_port *
1301 lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
1302 {
1303 struct lagg_port *lp_next, *rval = NULL;
1304 // int new_link = LINK_STATE_DOWN;
1305
1306 LAGG_RLOCK_ASSERT(sc);
1307 /*
1308 * Search a port which reports an active link state.
1309 */
1310
1311 if (lp == NULL)
1312 goto search;
1313 if (LAGG_PORTACTIVE(lp)) {
1314 rval = lp;
1315 goto found;
1316 }
1317 if ((lp_next = SLIST_NEXT(lp, lp_entries)) != NULL &&
1318 LAGG_PORTACTIVE(lp_next)) {
1319 rval = lp_next;
1320 goto found;
1321 }
1322
1323 search:
1324 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1325 if (LAGG_PORTACTIVE(lp_next)) {
1326 rval = lp_next;
1327 goto found;
1328 }
1329 }
1330
1331 found:
1332 if (rval != NULL) {
1333 /*
1334 * The IEEE 802.1D standard assumes that a lagg with
1335 * multiple ports is always full duplex. This is valid
1336 * for load sharing laggs and if at least two links
1337 * are active. Unfortunately, checking the latter would
1338 * be too expensive at this point.
1339 XXX
1340 if ((sc->sc_capabilities & IFCAP_LAGG_FULLDUPLEX) &&
1341 (sc->sc_count > 1))
1342 new_link = LINK_STATE_FULL_DUPLEX;
1343 else
1344 new_link = rval->lp_link_state;
1345 */
1346 }
1347
1348 return (rval);
1349 }
1350
1351 static const void *
1352 lagg_gethdr(struct mbuf *m, u_int off, u_int len, void *buf)
1353 {
1354 if (m->m_pkthdr.len < (off + len)) {
1355 return (NULL);
1356 } else if (m->m_len < (off + len)) {
1357 m_copydata(m, off, len, buf);
1358 return (buf);
1359 }
1360 return (mtod(m, char *) + off);
1361 }
1362
1363 uint32_t
1364 lagg_hashmbuf(struct mbuf *m, uint32_t key)
1365 {
1366 uint16_t etype;
1367 uint32_t p = 0;
1368 int off;
1369 struct ether_header *eh;
1370 struct ether_vlan_header vlanbuf;
1371 const struct ether_vlan_header *vlan;
1372 struct m_tag *mtag;
1373 u_int tag;
1374 #ifdef INET
1375 const struct ip *ip;
1376 struct ip ipbuf;
1377 #endif
1378 #ifdef INET6
1379 const struct ip6_hdr *ip6;
1380 struct ip6_hdr ip6buf;
1381 uint32_t flow;
1382 #endif
1383
1384 off = sizeof(*eh);
1385 if (m->m_len < off)
1386 goto out;
1387 eh = mtod(m, struct ether_header *);
1388 etype = ntohs(eh->ether_type);
1389 p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, key);
1390 p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p);
1391
1392 /* Special handling for encapsulating VLAN frames */
1393 if (m->m_flags & M_VLANTAG) {
1394 mtag = m_tag_locate(m, MTAG_VLAN, MTAG_VLAN_TAG, NULL);
1395 KASSERT(mtag != NULL,
1396 ("%s: M_VLANTAG without m_tag", __func__));
1397 tag = EVL_VLANOFTAG(VLAN_TAG_VALUE(mtag));
1398 p = hash32_buf(&tag, sizeof(tag), p);
1399 } else if (etype == ETHERTYPE_VLAN) {
1400 vlan = lagg_gethdr(m, off, sizeof(*vlan), &vlanbuf);
1401 if (vlan == NULL)
1402 goto out;
1403
1404 p = hash32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p);
1405 etype = ntohs(vlan->evl_proto);
1406 off += sizeof(*vlan) - sizeof(*eh);
1407 }
1408
1409 switch (etype) {
1410 #ifdef INET
1411 case ETHERTYPE_IP:
1412 ip = lagg_gethdr(m, off, sizeof(*ip), &ipbuf);
1413 if (ip == NULL)
1414 goto out;
1415
1416 p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p);
1417 p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p);
1418 break;
1419 #endif
1420 #ifdef INET6
1421 case ETHERTYPE_IPV6:
1422 ip6 = lagg_gethdr(m, off, sizeof(*ip6), &ip6buf);
1423 if (ip6 == NULL)
1424 goto out;
1425
1426 p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p);
1427 p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p);
1428 flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK;
1429 p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */
1430 break;
1431 #endif
1432 }
1433 out:
1434 return (p);
1435 }
1436
1437 int
1438 lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
1439 {
1440 int error = 0;
1441
1442 IFQ_HANDOFF(ifp, m, error);
1443 if (error)
1444 ifp->if_oerrors++;
1445 return (error);
1446 }
1447
1448 /*
1449 * Simple round robin aggregation
1450 */
1451
1452 static int
1453 lagg_rr_attach(struct lagg_softc *sc)
1454 {
1455 sc->sc_detach = lagg_rr_detach;
1456 sc->sc_start = lagg_rr_start;
1457 sc->sc_input = lagg_rr_input;
1458 sc->sc_port_create = NULL;
1459 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1460 sc->sc_seq = 0;
1461
1462 return (0);
1463 }
1464
1465 static int
1466 lagg_rr_detach(struct lagg_softc *sc)
1467 {
1468 return (0);
1469 }
1470
1471 static int
1472 lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
1473 {
1474 struct lagg_port *lp;
1475 uint32_t p;
1476
1477 p = atomic_fetchadd_32(&sc->sc_seq, 1);
1478 p %= sc->sc_count;
1479 lp = SLIST_FIRST(&sc->sc_ports);
1480 while (p--)
1481 lp = SLIST_NEXT(lp, lp_entries);
1482
1483 /*
1484 * Check the port's link state. This will return the next active
1485 * port if the link is down or the port is NULL.
1486 */
1487 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1488 m_freem(m);
1489 return (ENOENT);
1490 }
1491
1492 /* Send mbuf */
1493 return (lagg_enqueue(lp->lp_ifp, m));
1494 }
1495
1496 static struct mbuf *
1497 lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1498 {
1499 struct ifnet *ifp = sc->sc_ifp;
1500
1501 /* Just pass in the packet to our lagg device */
1502 m->m_pkthdr.rcvif = ifp;
1503
1504 return (m);
1505 }
1506
1507 /*
1508 * Active failover
1509 */
1510
1511 static int
1512 lagg_fail_attach(struct lagg_softc *sc)
1513 {
1514 sc->sc_detach = lagg_fail_detach;
1515 sc->sc_start = lagg_fail_start;
1516 sc->sc_input = lagg_fail_input;
1517 sc->sc_port_create = NULL;
1518 sc->sc_port_destroy = NULL;
1519
1520 return (0);
1521 }
1522
1523 static int
1524 lagg_fail_detach(struct lagg_softc *sc)
1525 {
1526 return (0);
1527 }
1528
1529 static int
1530 lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
1531 {
1532 struct lagg_port *lp;
1533
1534 /* Use the master port if active or the next available port */
1535 if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
1536 m_freem(m);
1537 return (ENOENT);
1538 }
1539
1540 /* Send mbuf */
1541 return (lagg_enqueue(lp->lp_ifp, m));
1542 }
1543
1544 static struct mbuf *
1545 lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1546 {
1547 struct ifnet *ifp = sc->sc_ifp;
1548 struct lagg_port *tmp_tp;
1549
1550 if (lp == sc->sc_primary) {
1551 m->m_pkthdr.rcvif = ifp;
1552 return (m);
1553 }
1554
1555 if (!LAGG_PORTACTIVE(sc->sc_primary)) {
1556 tmp_tp = lagg_link_active(sc, sc->sc_primary);
1557 /*
1558 * If tmp_tp is null, we've recieved a packet when all
1559 * our links are down. Weird, but process it anyways.
1560 */
1561 if ((tmp_tp == NULL || tmp_tp == lp)) {
1562 m->m_pkthdr.rcvif = ifp;
1563 return (m);
1564 }
1565 }
1566
1567 m_freem(m);
1568 return (NULL);
1569 }
1570
1571 /*
1572 * Loadbalancing
1573 */
1574
1575 static int
1576 lagg_lb_attach(struct lagg_softc *sc)
1577 {
1578 struct lagg_port *lp;
1579 struct lagg_lb *lb;
1580
1581 if ((lb = (struct lagg_lb *)malloc(sizeof(struct lagg_lb),
1582 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
1583 return (ENOMEM);
1584
1585 sc->sc_detach = lagg_lb_detach;
1586 sc->sc_start = lagg_lb_start;
1587 sc->sc_input = lagg_lb_input;
1588 sc->sc_port_create = lagg_lb_port_create;
1589 sc->sc_port_destroy = lagg_lb_port_destroy;
1590 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1591
1592 lb->lb_key = arc4random();
1593 sc->sc_psc = (caddr_t)lb;
1594
1595 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1596 lagg_lb_port_create(lp);
1597
1598 return (0);
1599 }
1600
1601 static int
1602 lagg_lb_detach(struct lagg_softc *sc)
1603 {
1604 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1605 if (lb != NULL)
1606 free(lb, M_DEVBUF);
1607 return (0);
1608 }
1609
1610 static int
1611 lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
1612 {
1613 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1614 struct lagg_port *lp_next;
1615 int i = 0;
1616
1617 bzero(&lb->lb_ports, sizeof(lb->lb_ports));
1618 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1619 if (lp_next == lp)
1620 continue;
1621 if (i >= LAGG_MAX_PORTS)
1622 return (EINVAL);
1623 if (sc->sc_ifflags & IFF_DEBUG)
1624 printf("%s: port %s at index %d\n",
1625 sc->sc_ifname, lp_next->lp_ifname, i);
1626 lb->lb_ports[i++] = lp_next;
1627 }
1628
1629 return (0);
1630 }
1631
1632 static int
1633 lagg_lb_port_create(struct lagg_port *lp)
1634 {
1635 struct lagg_softc *sc = lp->lp_softc;
1636 return (lagg_lb_porttable(sc, NULL));
1637 }
1638
1639 static void
1640 lagg_lb_port_destroy(struct lagg_port *lp)
1641 {
1642 struct lagg_softc *sc = lp->lp_softc;
1643 lagg_lb_porttable(sc, lp);
1644 }
1645
1646 static int
1647 lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
1648 {
1649 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1650 struct lagg_port *lp = NULL;
1651 uint32_t p = 0;
1652
1653 p = lagg_hashmbuf(m, lb->lb_key);
1654 p %= sc->sc_count;
1655 lp = lb->lb_ports[p];
1656
1657 /*
1658 * Check the port's link state. This will return the next active
1659 * port if the link is down or the port is NULL.
1660 */
1661 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1662 m_freem(m);
1663 return (ENOENT);
1664 }
1665
1666 /* Send mbuf */
1667 return (lagg_enqueue(lp->lp_ifp, m));
1668 }
1669
1670 static struct mbuf *
1671 lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1672 {
1673 struct ifnet *ifp = sc->sc_ifp;
1674
1675 /* Just pass in the packet to our lagg device */
1676 m->m_pkthdr.rcvif = ifp;
1677
1678 return (m);
1679 }
1680
1681 /*
1682 * 802.3ad LACP
1683 */
1684
1685 static int
1686 lagg_lacp_attach(struct lagg_softc *sc)
1687 {
1688 struct lagg_port *lp;
1689 int error;
1690
1691 sc->sc_detach = lagg_lacp_detach;
1692 sc->sc_port_create = lacp_port_create;
1693 sc->sc_port_destroy = lacp_port_destroy;
1694 sc->sc_linkstate = lacp_linkstate;
1695 sc->sc_start = lagg_lacp_start;
1696 sc->sc_input = lagg_lacp_input;
1697 sc->sc_init = lacp_init;
1698 sc->sc_stop = lacp_stop;
1699 sc->sc_lladdr = lagg_lacp_lladdr;
1700 sc->sc_req = lacp_req;
1701 sc->sc_portreq = lacp_portreq;
1702
1703 error = lacp_attach(sc);
1704 if (error)
1705 return (error);
1706
1707 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1708 lacp_port_create(lp);
1709
1710 return (error);
1711 }
1712
1713 static int
1714 lagg_lacp_detach(struct lagg_softc *sc)
1715 {
1716 struct lagg_port *lp;
1717 int error;
1718
1719 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1720 lacp_port_destroy(lp);
1721
1722 /* unlocking is safe here */
1723 LAGG_WUNLOCK(sc);
1724 error = lacp_detach(sc);
1725 LAGG_WLOCK(sc);
1726
1727 return (error);
1728 }
1729
1730 static void
1731 lagg_lacp_lladdr(struct lagg_softc *sc)
1732 {
1733 struct lagg_port *lp;
1734
1735 /* purge all the lacp ports */
1736 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1737 lacp_port_destroy(lp);
1738
1739 /* add them back in */
1740 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1741 lacp_port_create(lp);
1742 }
1743
1744 static int
1745 lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
1746 {
1747 struct lagg_port *lp;
1748
1749 lp = lacp_select_tx_port(sc, m);
1750 if (lp == NULL) {
1751 m_freem(m);
1752 return (EBUSY);
1753 }
1754
1755 /* Send mbuf */
1756 return (lagg_enqueue(lp->lp_ifp, m));
1757 }
1758
1759 static struct mbuf *
1760 lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1761 {
1762 struct ifnet *ifp = sc->sc_ifp;
1763 struct ether_header *eh;
1764 u_short etype;
1765
1766 eh = mtod(m, struct ether_header *);
1767 etype = ntohs(eh->ether_type);
1768
1769 /* Tap off LACP control messages */
1770 if (etype == ETHERTYPE_SLOW) {
1771 m = lacp_input(lp, m);
1772 if (m == NULL)
1773 return (NULL);
1774 }
1775
1776 /*
1777 * If the port is not collecting or not in the active aggregator then
1778 * free and return.
1779 */
1780 if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
1781 m_freem(m);
1782 return (NULL);
1783 }
1784
1785 m->m_pkthdr.rcvif = ifp;
1786 return (m);
1787 }
Cache object: 39d7f2d7fdfc7877613030501f7526ae
|