FreeBSD/Linux Kernel Cross Reference
sys/net/if_lagg.c
1 /* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __FBSDID("$FreeBSD: releng/7.4/sys/net/if_lagg.c 214112 2010-10-20 17:58:12Z delphij $");
22
23 #include "opt_inet.h"
24 #include "opt_inet6.h"
25
26 #include <sys/param.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/mbuf.h>
30 #include <sys/queue.h>
31 #include <sys/socket.h>
32 #include <sys/sockio.h>
33 #include <sys/sysctl.h>
34 #include <sys/module.h>
35 #include <sys/priv.h>
36 #include <sys/systm.h>
37 #include <sys/proc.h>
38 #include <sys/hash.h>
39 #include <sys/lock.h>
40 #include <sys/rwlock.h>
41 #include <sys/taskqueue.h>
42
43 #include <net/ethernet.h>
44 #include <net/if.h>
45 #include <net/if_clone.h>
46 #include <net/if_arp.h>
47 #include <net/if_dl.h>
48 #include <net/if_llc.h>
49 #include <net/if_media.h>
50 #include <net/if_types.h>
51 #include <net/if_var.h>
52 #include <net/bpf.h>
53
54 #ifdef INET
55 #include <netinet/in.h>
56 #include <netinet/in_systm.h>
57 #include <netinet/if_ether.h>
58 #include <netinet/ip.h>
59 #endif
60
61 #ifdef INET6
62 #include <netinet/ip6.h>
63 #endif
64
65 #include <net/if_vlan_var.h>
66 #include <net/if_lagg.h>
67 #include <net/ieee8023ad_lacp.h>
68
69 /* Special flags we should propagate to the lagg ports. */
70 static struct {
71 int flag;
72 int (*func)(struct ifnet *, int);
73 } lagg_pflags[] = {
74 {IFF_PROMISC, ifpromisc},
75 {IFF_ALLMULTI, if_allmulti},
76 {0, NULL}
77 };
78
79 SLIST_HEAD(__trhead, lagg_softc) lagg_list; /* list of laggs */
80 static struct mtx lagg_list_mtx;
81 eventhandler_tag lagg_detach_cookie = NULL;
82
83 static int lagg_clone_create(struct if_clone *, int, caddr_t);
84 static void lagg_clone_destroy(struct ifnet *);
85 static void lagg_lladdr(struct lagg_softc *, uint8_t *);
86 static void lagg_capabilities(struct lagg_softc *);
87 static void lagg_port_lladdr(struct lagg_port *, uint8_t *);
88 static void lagg_port_setlladdr(void *, int);
89 static int lagg_port_create(struct lagg_softc *, struct ifnet *);
90 static int lagg_port_destroy(struct lagg_port *, int);
91 static struct mbuf *lagg_input(struct ifnet *, struct mbuf *);
92 static void lagg_linkstate(struct lagg_softc *);
93 static void lagg_port_state(struct ifnet *, int);
94 static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
95 static int lagg_port_output(struct ifnet *, struct mbuf *,
96 struct sockaddr *, struct rtentry *);
97 static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
98 static int lagg_port_checkstacking(struct lagg_softc *);
99 static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
100 static void lagg_init(void *);
101 static void lagg_stop(struct lagg_softc *);
102 static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
103 static int lagg_ether_setmulti(struct lagg_softc *);
104 static int lagg_ether_cmdmulti(struct lagg_port *, int);
105 static int lagg_setflag(struct lagg_port *, int, int,
106 int (*func)(struct ifnet *, int));
107 static int lagg_setflags(struct lagg_port *, int status);
108 static void lagg_start(struct ifnet *);
109 static int lagg_media_change(struct ifnet *);
110 static void lagg_media_status(struct ifnet *, struct ifmediareq *);
111 static struct lagg_port *lagg_link_active(struct lagg_softc *,
112 struct lagg_port *);
113 static const void *lagg_gethdr(struct mbuf *, u_int, u_int, void *);
114
115 IFC_SIMPLE_DECLARE(lagg, 0);
116
117 /* Simple round robin */
118 static int lagg_rr_attach(struct lagg_softc *);
119 static int lagg_rr_detach(struct lagg_softc *);
120 static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
121 static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
122 struct mbuf *);
123
124 /* Active failover */
125 static int lagg_fail_attach(struct lagg_softc *);
126 static int lagg_fail_detach(struct lagg_softc *);
127 static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
128 static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
129 struct mbuf *);
130
131 /* Loadbalancing */
132 static int lagg_lb_attach(struct lagg_softc *);
133 static int lagg_lb_detach(struct lagg_softc *);
134 static int lagg_lb_port_create(struct lagg_port *);
135 static void lagg_lb_port_destroy(struct lagg_port *);
136 static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
137 static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
138 struct mbuf *);
139 static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
140
141 /* 802.3ad LACP */
142 static int lagg_lacp_attach(struct lagg_softc *);
143 static int lagg_lacp_detach(struct lagg_softc *);
144 static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
145 static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
146 struct mbuf *);
147 static void lagg_lacp_lladdr(struct lagg_softc *);
148
149 /* lagg protocol table */
150 static const struct {
151 int ti_proto;
152 int (*ti_attach)(struct lagg_softc *);
153 } lagg_protos[] = {
154 { LAGG_PROTO_ROUNDROBIN, lagg_rr_attach },
155 { LAGG_PROTO_FAILOVER, lagg_fail_attach },
156 { LAGG_PROTO_LOADBALANCE, lagg_lb_attach },
157 { LAGG_PROTO_ETHERCHANNEL, lagg_lb_attach },
158 { LAGG_PROTO_LACP, lagg_lacp_attach },
159 { LAGG_PROTO_NONE, NULL }
160 };
161
162 SYSCTL_DECL(_net_link);
163 SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW, 0, "Link Aggregation");
164
165 static int lagg_failover_rx_all = 0; /* Allow input on any failover links */
166 SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW,
167 &lagg_failover_rx_all, 0,
168 "Accept input from any interface in a failover lagg");
169
170 static int
171 lagg_modevent(module_t mod, int type, void *data)
172 {
173
174 switch (type) {
175 case MOD_LOAD:
176 mtx_init(&lagg_list_mtx, "if_lagg list", NULL, MTX_DEF);
177 SLIST_INIT(&lagg_list);
178 if_clone_attach(&lagg_cloner);
179 lagg_input_p = lagg_input;
180 lagg_linkstate_p = lagg_port_state;
181 lagg_detach_cookie = EVENTHANDLER_REGISTER(
182 ifnet_departure_event, lagg_port_ifdetach, NULL,
183 EVENTHANDLER_PRI_ANY);
184 break;
185 case MOD_UNLOAD:
186 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
187 lagg_detach_cookie);
188 if_clone_detach(&lagg_cloner);
189 lagg_input_p = NULL;
190 lagg_linkstate_p = NULL;
191 mtx_destroy(&lagg_list_mtx);
192 break;
193 default:
194 return (EOPNOTSUPP);
195 }
196 return (0);
197 }
198
199 static moduledata_t lagg_mod = {
200 "if_lagg",
201 lagg_modevent,
202 0
203 };
204
205 DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
206
207 static int
208 lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
209 {
210 struct lagg_softc *sc;
211 struct ifnet *ifp;
212 int i, error = 0;
213 static const u_char eaddr[6]; /* 00:00:00:00:00:00 */
214
215 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
216 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
217 if (ifp == NULL) {
218 free(sc, M_DEVBUF);
219 return (ENOSPC);
220 }
221
222 sc->sc_proto = LAGG_PROTO_NONE;
223 for (i = 0; lagg_protos[i].ti_proto != LAGG_PROTO_NONE; i++) {
224 if (lagg_protos[i].ti_proto == LAGG_PROTO_DEFAULT) {
225 sc->sc_proto = lagg_protos[i].ti_proto;
226 if ((error = lagg_protos[i].ti_attach(sc)) != 0) {
227 if_free_type(ifp, IFT_ETHER);
228 free(sc, M_DEVBUF);
229 return (error);
230 }
231 break;
232 }
233 }
234 LAGG_LOCK_INIT(sc);
235 SLIST_INIT(&sc->sc_ports);
236 TASK_INIT(&sc->sc_lladdr_task, 0, lagg_port_setlladdr, sc);
237
238 /* Initialise pseudo media types */
239 ifmedia_init(&sc->sc_media, 0, lagg_media_change,
240 lagg_media_status);
241 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
242 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
243
244 if_initname(ifp, ifc->ifc_name, unit);
245 ifp->if_type = IFT_ETHER;
246 ifp->if_softc = sc;
247 ifp->if_start = lagg_start;
248 ifp->if_init = lagg_init;
249 ifp->if_ioctl = lagg_ioctl;
250 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
251
252 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
253 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
254 IFQ_SET_READY(&ifp->if_snd);
255
256 /*
257 * Attach as an ordinary ethernet device, childs will be attached
258 * as special device IFT_IEEE8023ADLAG.
259 */
260 ether_ifattach(ifp, eaddr);
261
262 /* Insert into the global list of laggs */
263 mtx_lock(&lagg_list_mtx);
264 SLIST_INSERT_HEAD(&lagg_list, sc, sc_entries);
265 mtx_unlock(&lagg_list_mtx);
266
267 return (0);
268 }
269
270 static void
271 lagg_clone_destroy(struct ifnet *ifp)
272 {
273 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
274 struct lagg_port *lp;
275
276 LAGG_WLOCK(sc);
277
278 lagg_stop(sc);
279 ifp->if_flags &= ~IFF_UP;
280
281 /* Shutdown and remove lagg ports */
282 while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL)
283 lagg_port_destroy(lp, 1);
284 /* Unhook the aggregation protocol */
285 (*sc->sc_detach)(sc);
286
287 LAGG_WUNLOCK(sc);
288
289 ifmedia_removeall(&sc->sc_media);
290 ether_ifdetach(ifp);
291 if_free_type(ifp, IFT_ETHER);
292
293 mtx_lock(&lagg_list_mtx);
294 SLIST_REMOVE(&lagg_list, sc, lagg_softc, sc_entries);
295 mtx_unlock(&lagg_list_mtx);
296
297 taskqueue_drain(taskqueue_swi, &sc->sc_lladdr_task);
298 LAGG_LOCK_DESTROY(sc);
299 free(sc, M_DEVBUF);
300 }
301
302 static void
303 lagg_lladdr(struct lagg_softc *sc, uint8_t *lladdr)
304 {
305 struct ifnet *ifp = sc->sc_ifp;
306
307 if (memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
308 return;
309
310 bcopy(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN);
311 /* Let the protocol know the MAC has changed */
312 if (sc->sc_lladdr != NULL)
313 (*sc->sc_lladdr)(sc);
314 }
315
316 static void
317 lagg_capabilities(struct lagg_softc *sc)
318 {
319 struct lagg_port *lp;
320 int cap = ~0, ena = ~0;
321
322 LAGG_WLOCK_ASSERT(sc);
323
324 /* Get capabilities from the lagg ports */
325 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
326 cap &= lp->lp_ifp->if_capabilities;
327 ena &= lp->lp_ifp->if_capenable;
328 }
329 cap = (cap == ~0 ? 0 : cap);
330 ena = (ena == ~0 ? 0 : ena);
331
332 if (sc->sc_ifp->if_capabilities != cap ||
333 sc->sc_ifp->if_capenable != ena) {
334 sc->sc_ifp->if_capabilities = cap;
335 sc->sc_ifp->if_capenable = ena;
336 getmicrotime(&sc->sc_ifp->if_lastchange);
337
338 if (sc->sc_ifflags & IFF_DEBUG)
339 if_printf(sc->sc_ifp,
340 "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
341 }
342 }
343
344 static void
345 lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr)
346 {
347 struct lagg_softc *sc = lp->lp_softc;
348 struct ifnet *ifp = lp->lp_ifp;
349 struct lagg_llq *llq;
350 int pending = 0;
351
352 LAGG_WLOCK_ASSERT(sc);
353
354 if (lp->lp_detaching ||
355 memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
356 return;
357
358 /* Check to make sure its not already queued to be changed */
359 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
360 if (llq->llq_ifp == ifp) {
361 pending = 1;
362 break;
363 }
364 }
365
366 if (!pending) {
367 llq = malloc(sizeof(struct lagg_llq), M_DEVBUF, M_NOWAIT);
368 if (llq == NULL) /* XXX what to do */
369 return;
370 }
371
372 /* Update the lladdr even if pending, it may have changed */
373 llq->llq_ifp = ifp;
374 bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN);
375
376 if (!pending)
377 SLIST_INSERT_HEAD(&sc->sc_llq_head, llq, llq_entries);
378
379 taskqueue_enqueue(taskqueue_swi, &sc->sc_lladdr_task);
380 }
381
382 /*
383 * Set the interface MAC address from a taskqueue to avoid a LOR.
384 */
385 static void
386 lagg_port_setlladdr(void *arg, int pending)
387 {
388 struct lagg_softc *sc = (struct lagg_softc *)arg;
389 struct lagg_llq *llq, *head;
390 struct ifnet *ifp;
391 int error;
392
393 /* Grab a local reference of the queue and remove it from the softc */
394 LAGG_WLOCK(sc);
395 head = SLIST_FIRST(&sc->sc_llq_head);
396 SLIST_FIRST(&sc->sc_llq_head) = NULL;
397 LAGG_WUNLOCK(sc);
398
399 /*
400 * Traverse the queue and set the lladdr on each ifp. It is safe to do
401 * unlocked as we have the only reference to it.
402 */
403 for (llq = head; llq != NULL; llq = head) {
404 ifp = llq->llq_ifp;
405
406 /* Set the link layer address */
407 error = if_setlladdr(ifp, llq->llq_lladdr, ETHER_ADDR_LEN);
408 if (error)
409 printf("%s: setlladdr failed on %s\n", __func__,
410 ifp->if_xname);
411
412 head = SLIST_NEXT(llq, llq_entries);
413 free(llq, M_DEVBUF);
414 }
415 }
416
417 static int
418 lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
419 {
420 struct lagg_softc *sc_ptr;
421 struct lagg_port *lp;
422 int error = 0;
423
424 LAGG_WLOCK_ASSERT(sc);
425
426 /* Limit the maximal number of lagg ports */
427 if (sc->sc_count >= LAGG_MAX_PORTS)
428 return (ENOSPC);
429
430 /* Check if port has already been associated to a lagg */
431 if (ifp->if_lagg != NULL)
432 return (EBUSY);
433
434 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */
435 if (ifp->if_type != IFT_ETHER)
436 return (EPROTONOSUPPORT);
437
438 /* Allow the first Ethernet member to define the MTU */
439 if (SLIST_EMPTY(&sc->sc_ports))
440 sc->sc_ifp->if_mtu = ifp->if_mtu;
441 else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
442 if_printf(sc->sc_ifp, "invalid MTU for %s\n",
443 ifp->if_xname);
444 return (EINVAL);
445 }
446
447 if ((lp = malloc(sizeof(struct lagg_port),
448 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
449 return (ENOMEM);
450
451 /* Check if port is a stacked lagg */
452 mtx_lock(&lagg_list_mtx);
453 SLIST_FOREACH(sc_ptr, &lagg_list, sc_entries) {
454 if (ifp == sc_ptr->sc_ifp) {
455 mtx_unlock(&lagg_list_mtx);
456 free(lp, M_DEVBUF);
457 return (EINVAL);
458 /* XXX disable stacking for the moment, its untested
459 lp->lp_flags |= LAGG_PORT_STACK;
460 if (lagg_port_checkstacking(sc_ptr) >=
461 LAGG_MAX_STACKING) {
462 mtx_unlock(&lagg_list_mtx);
463 free(lp, M_DEVBUF);
464 return (E2BIG);
465 }
466 */
467 }
468 }
469 mtx_unlock(&lagg_list_mtx);
470
471 /* Change the interface type */
472 lp->lp_iftype = ifp->if_type;
473 ifp->if_type = IFT_IEEE8023ADLAG;
474 ifp->if_lagg = lp;
475 lp->lp_ioctl = ifp->if_ioctl;
476 ifp->if_ioctl = lagg_port_ioctl;
477 lp->lp_output = ifp->if_output;
478 ifp->if_output = lagg_port_output;
479
480 lp->lp_ifp = ifp;
481 lp->lp_softc = sc;
482
483 /* Save port link layer address */
484 bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
485
486 if (SLIST_EMPTY(&sc->sc_ports)) {
487 sc->sc_primary = lp;
488 lagg_lladdr(sc, IF_LLADDR(ifp));
489 } else {
490 /* Update link layer address for this port */
491 lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp));
492 }
493
494 /* Insert into the list of ports */
495 SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
496 sc->sc_count++;
497
498 /* Update lagg capabilities */
499 lagg_capabilities(sc);
500 lagg_linkstate(sc);
501
502 /* Add multicast addresses and interface flags to this port */
503 lagg_ether_cmdmulti(lp, 1);
504 lagg_setflags(lp, 1);
505
506 if (sc->sc_port_create != NULL)
507 error = (*sc->sc_port_create)(lp);
508 if (error) {
509 /* remove the port again, without calling sc_port_destroy */
510 lagg_port_destroy(lp, 0);
511 return (error);
512 }
513
514 return (error);
515 }
516
517 static int
518 lagg_port_checkstacking(struct lagg_softc *sc)
519 {
520 struct lagg_softc *sc_ptr;
521 struct lagg_port *lp;
522 int m = 0;
523
524 LAGG_WLOCK_ASSERT(sc);
525
526 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
527 if (lp->lp_flags & LAGG_PORT_STACK) {
528 sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
529 m = MAX(m, lagg_port_checkstacking(sc_ptr));
530 }
531 }
532
533 return (m + 1);
534 }
535
536 static int
537 lagg_port_destroy(struct lagg_port *lp, int runpd)
538 {
539 struct lagg_softc *sc = lp->lp_softc;
540 struct lagg_port *lp_ptr;
541 struct lagg_llq *llq;
542 struct ifnet *ifp = lp->lp_ifp;
543
544 LAGG_WLOCK_ASSERT(sc);
545
546 if (runpd && sc->sc_port_destroy != NULL)
547 (*sc->sc_port_destroy)(lp);
548
549 /*
550 * Remove multicast addresses and interface flags from this port and
551 * reset the MAC address, skip if the interface is being detached.
552 */
553 if (!lp->lp_detaching) {
554 lagg_ether_cmdmulti(lp, 0);
555 lagg_setflags(lp, 0);
556 lagg_port_lladdr(lp, lp->lp_lladdr);
557 }
558
559 /* Restore interface */
560 ifp->if_type = lp->lp_iftype;
561 ifp->if_ioctl = lp->lp_ioctl;
562 ifp->if_output = lp->lp_output;
563 ifp->if_lagg = NULL;
564
565 /* Finally, remove the port from the lagg */
566 SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
567 sc->sc_count--;
568
569 /* Update the primary interface */
570 if (lp == sc->sc_primary) {
571 uint8_t lladdr[ETHER_ADDR_LEN];
572
573 if ((lp_ptr = SLIST_FIRST(&sc->sc_ports)) == NULL) {
574 bzero(&lladdr, ETHER_ADDR_LEN);
575 } else {
576 bcopy(lp_ptr->lp_lladdr,
577 lladdr, ETHER_ADDR_LEN);
578 }
579 lagg_lladdr(sc, lladdr);
580 sc->sc_primary = lp_ptr;
581
582 /* Update link layer address for each port */
583 SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
584 lagg_port_lladdr(lp_ptr, lladdr);
585 }
586
587 /* Remove any pending lladdr changes from the queue */
588 if (lp->lp_detaching) {
589 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
590 if (llq->llq_ifp == ifp) {
591 SLIST_REMOVE(&sc->sc_llq_head, llq, lagg_llq,
592 llq_entries);
593 free(llq, M_DEVBUF);
594 break; /* Only appears once */
595 }
596 }
597 }
598
599 if (lp->lp_ifflags)
600 if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
601
602 free(lp, M_DEVBUF);
603
604 /* Update lagg capabilities */
605 lagg_capabilities(sc);
606 lagg_linkstate(sc);
607
608 return (0);
609 }
610
611 static int
612 lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
613 {
614 struct lagg_reqport *rp = (struct lagg_reqport *)data;
615 struct lagg_softc *sc;
616 struct lagg_port *lp = NULL;
617 int error = 0;
618
619 /* Should be checked by the caller */
620 if (ifp->if_type != IFT_IEEE8023ADLAG ||
621 (lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
622 goto fallback;
623
624 switch (cmd) {
625 case SIOCGLAGGPORT:
626 if (rp->rp_portname[0] == '\0' ||
627 ifunit(rp->rp_portname) != ifp) {
628 error = EINVAL;
629 break;
630 }
631
632 LAGG_RLOCK(sc);
633 if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
634 error = ENOENT;
635 LAGG_RUNLOCK(sc);
636 break;
637 }
638
639 lagg_port2req(lp, rp);
640 LAGG_RUNLOCK(sc);
641 break;
642
643 case SIOCSIFCAP:
644 if (lp->lp_ioctl == NULL) {
645 error = EINVAL;
646 break;
647 }
648 error = (*lp->lp_ioctl)(ifp, cmd, data);
649 if (error)
650 break;
651
652 /* Update lagg interface capabilities */
653 LAGG_WLOCK(sc);
654 lagg_capabilities(sc);
655 LAGG_WUNLOCK(sc);
656 break;
657
658 case SIOCSIFMTU:
659 /* Do not allow the MTU to be changed once joined */
660 error = EINVAL;
661 break;
662
663 default:
664 goto fallback;
665 }
666
667 return (error);
668
669 fallback:
670 if (lp->lp_ioctl != NULL)
671 return ((*lp->lp_ioctl)(ifp, cmd, data));
672
673 return (EINVAL);
674 }
675
676 static int
677 lagg_port_output(struct ifnet *ifp, struct mbuf *m,
678 struct sockaddr *dst, struct rtentry *rt0)
679 {
680 struct lagg_port *lp = ifp->if_lagg;
681 struct ether_header *eh;
682 short type = 0;
683
684 switch (dst->sa_family) {
685 case pseudo_AF_HDRCMPLT:
686 case AF_UNSPEC:
687 eh = (struct ether_header *)dst->sa_data;
688 type = eh->ether_type;
689 break;
690 }
691
692 /*
693 * Only allow ethernet types required to initiate or maintain the link,
694 * aggregated frames take a different path.
695 */
696 switch (ntohs(type)) {
697 case ETHERTYPE_PAE: /* EAPOL PAE/802.1x */
698 return ((*lp->lp_output)(ifp, m, dst, rt0));
699 }
700
701 /* drop any other frames */
702 m_freem(m);
703 return (EBUSY);
704 }
705
706 static void
707 lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
708 {
709 struct lagg_port *lp;
710 struct lagg_softc *sc;
711
712 if ((lp = ifp->if_lagg) == NULL)
713 return;
714
715 sc = lp->lp_softc;
716
717 LAGG_WLOCK(sc);
718 lp->lp_detaching = 1;
719 lagg_port_destroy(lp, 1);
720 LAGG_WUNLOCK(sc);
721 }
722
723 static void
724 lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
725 {
726 struct lagg_softc *sc = lp->lp_softc;
727
728 strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
729 strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
730 rp->rp_prio = lp->lp_prio;
731 rp->rp_flags = lp->lp_flags;
732 if (sc->sc_portreq != NULL)
733 (*sc->sc_portreq)(lp, (caddr_t)&rp->rp_psc);
734
735 /* Add protocol specific flags */
736 switch (sc->sc_proto) {
737 case LAGG_PROTO_FAILOVER:
738 if (lp == sc->sc_primary)
739 rp->rp_flags |= LAGG_PORT_MASTER;
740 if (lp == lagg_link_active(sc, sc->sc_primary))
741 rp->rp_flags |= LAGG_PORT_ACTIVE;
742 break;
743
744 case LAGG_PROTO_ROUNDROBIN:
745 case LAGG_PROTO_LOADBALANCE:
746 case LAGG_PROTO_ETHERCHANNEL:
747 if (LAGG_PORTACTIVE(lp))
748 rp->rp_flags |= LAGG_PORT_ACTIVE;
749 break;
750
751 case LAGG_PROTO_LACP:
752 /* LACP has a different definition of active */
753 if (lacp_isactive(lp))
754 rp->rp_flags |= LAGG_PORT_ACTIVE;
755 if (lacp_iscollecting(lp))
756 rp->rp_flags |= LAGG_PORT_COLLECTING;
757 if (lacp_isdistributing(lp))
758 rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
759 break;
760 }
761
762 }
763
764 static void
765 lagg_init(void *xsc)
766 {
767 struct lagg_softc *sc = (struct lagg_softc *)xsc;
768 struct lagg_port *lp;
769 struct ifnet *ifp = sc->sc_ifp;
770
771 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
772 return;
773
774 LAGG_WLOCK(sc);
775
776 ifp->if_drv_flags |= IFF_DRV_RUNNING;
777 /* Update the port lladdrs */
778 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
779 lagg_port_lladdr(lp, IF_LLADDR(ifp));
780
781 if (sc->sc_init != NULL)
782 (*sc->sc_init)(sc);
783
784 LAGG_WUNLOCK(sc);
785 }
786
787 static void
788 lagg_stop(struct lagg_softc *sc)
789 {
790 struct ifnet *ifp = sc->sc_ifp;
791
792 LAGG_WLOCK_ASSERT(sc);
793
794 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
795 return;
796
797 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
798
799 if (sc->sc_stop != NULL)
800 (*sc->sc_stop)(sc);
801 }
802
803 static int
804 lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
805 {
806 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
807 struct lagg_reqall *ra = (struct lagg_reqall *)data;
808 struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
809 struct ifreq *ifr = (struct ifreq *)data;
810 struct lagg_port *lp;
811 struct ifnet *tpif;
812 struct thread *td = curthread;
813 char *buf, *outbuf;
814 int count, buflen, len, error = 0;
815
816 bzero(&rpbuf, sizeof(rpbuf));
817
818 switch (cmd) {
819 case SIOCGLAGG:
820 LAGG_RLOCK(sc);
821 count = 0;
822 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
823 count++;
824 buflen = count * sizeof(struct lagg_reqport);
825 LAGG_RUNLOCK(sc);
826
827 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
828
829 LAGG_RLOCK(sc);
830 ra->ra_proto = sc->sc_proto;
831 if (sc->sc_req != NULL)
832 (*sc->sc_req)(sc, (caddr_t)&ra->ra_psc);
833
834 count = 0;
835 buf = outbuf;
836 len = min(ra->ra_size, buflen);
837 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
838 if (len < sizeof(rpbuf))
839 break;
840
841 lagg_port2req(lp, &rpbuf);
842 memcpy(buf, &rpbuf, sizeof(rpbuf));
843 count++;
844 buf += sizeof(rpbuf);
845 len -= sizeof(rpbuf);
846 }
847 LAGG_RUNLOCK(sc);
848 ra->ra_ports = count;
849 ra->ra_size = count * sizeof(rpbuf);
850 error = copyout(outbuf, ra->ra_port, ra->ra_size);
851 free(outbuf, M_TEMP);
852 break;
853 case SIOCSLAGG:
854 error = priv_check(td, PRIV_NET_LAGG);
855 if (error)
856 break;
857 if (ra->ra_proto >= LAGG_PROTO_MAX) {
858 error = EPROTONOSUPPORT;
859 break;
860 }
861 if (sc->sc_proto != LAGG_PROTO_NONE) {
862 LAGG_WLOCK(sc);
863 error = sc->sc_detach(sc);
864 /* Reset protocol and pointers */
865 sc->sc_proto = LAGG_PROTO_NONE;
866 sc->sc_detach = NULL;
867 sc->sc_start = NULL;
868 sc->sc_input = NULL;
869 sc->sc_port_create = NULL;
870 sc->sc_port_destroy = NULL;
871 sc->sc_linkstate = NULL;
872 sc->sc_init = NULL;
873 sc->sc_stop = NULL;
874 sc->sc_lladdr = NULL;
875 sc->sc_req = NULL;
876 sc->sc_portreq = NULL;
877 LAGG_WUNLOCK(sc);
878 }
879 if (error != 0)
880 break;
881 for (int i = 0; i < (sizeof(lagg_protos) /
882 sizeof(lagg_protos[0])); i++) {
883 if (lagg_protos[i].ti_proto == ra->ra_proto) {
884 if (sc->sc_ifflags & IFF_DEBUG)
885 printf("%s: using proto %u\n",
886 sc->sc_ifname,
887 lagg_protos[i].ti_proto);
888 LAGG_WLOCK(sc);
889 sc->sc_proto = lagg_protos[i].ti_proto;
890 if (sc->sc_proto != LAGG_PROTO_NONE)
891 error = lagg_protos[i].ti_attach(sc);
892 LAGG_WUNLOCK(sc);
893 return (error);
894 }
895 }
896 error = EPROTONOSUPPORT;
897 break;
898 case SIOCGLAGGPORT:
899 if (rp->rp_portname[0] == '\0' ||
900 (tpif = ifunit(rp->rp_portname)) == NULL) {
901 error = EINVAL;
902 break;
903 }
904
905 LAGG_RLOCK(sc);
906 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
907 lp->lp_softc != sc) {
908 error = ENOENT;
909 LAGG_RUNLOCK(sc);
910 break;
911 }
912
913 lagg_port2req(lp, rp);
914 LAGG_RUNLOCK(sc);
915 break;
916 case SIOCSLAGGPORT:
917 error = priv_check(td, PRIV_NET_LAGG);
918 if (error)
919 break;
920 if (rp->rp_portname[0] == '\0' ||
921 (tpif = ifunit(rp->rp_portname)) == NULL) {
922 error = EINVAL;
923 break;
924 }
925 LAGG_WLOCK(sc);
926 error = lagg_port_create(sc, tpif);
927 LAGG_WUNLOCK(sc);
928 break;
929 case SIOCSLAGGDELPORT:
930 error = priv_check(td, PRIV_NET_LAGG);
931 if (error)
932 break;
933 if (rp->rp_portname[0] == '\0' ||
934 (tpif = ifunit(rp->rp_portname)) == NULL) {
935 error = EINVAL;
936 break;
937 }
938
939 LAGG_WLOCK(sc);
940 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
941 lp->lp_softc != sc) {
942 error = ENOENT;
943 LAGG_WUNLOCK(sc);
944 break;
945 }
946
947 error = lagg_port_destroy(lp, 1);
948 LAGG_WUNLOCK(sc);
949 break;
950 case SIOCSIFFLAGS:
951 /* Set flags on ports too */
952 LAGG_WLOCK(sc);
953 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
954 lagg_setflags(lp, 1);
955 }
956 LAGG_WUNLOCK(sc);
957
958 if (!(ifp->if_flags & IFF_UP) &&
959 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
960 /*
961 * If interface is marked down and it is running,
962 * then stop and disable it.
963 */
964 LAGG_WLOCK(sc);
965 lagg_stop(sc);
966 LAGG_WUNLOCK(sc);
967 } else if ((ifp->if_flags & IFF_UP) &&
968 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
969 /*
970 * If interface is marked up and it is stopped, then
971 * start it.
972 */
973 (*ifp->if_init)(sc);
974 }
975 break;
976 case SIOCADDMULTI:
977 case SIOCDELMULTI:
978 LAGG_WLOCK(sc);
979 error = lagg_ether_setmulti(sc);
980 LAGG_WUNLOCK(sc);
981 break;
982 case SIOCSIFMEDIA:
983 case SIOCGIFMEDIA:
984 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
985 break;
986
987 case SIOCSIFCAP:
988 case SIOCSIFMTU:
989 /* Do not allow the MTU or caps to be directly changed */
990 error = EINVAL;
991 break;
992
993 default:
994 error = ether_ioctl(ifp, cmd, data);
995 break;
996 }
997 return (error);
998 }
999
1000 static int
1001 lagg_ether_setmulti(struct lagg_softc *sc)
1002 {
1003 struct lagg_port *lp;
1004
1005 LAGG_WLOCK_ASSERT(sc);
1006
1007 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1008 /* First, remove any existing filter entries. */
1009 lagg_ether_cmdmulti(lp, 0);
1010 /* copy all addresses from the lagg interface to the port */
1011 lagg_ether_cmdmulti(lp, 1);
1012 }
1013 return (0);
1014 }
1015
1016 static int
1017 lagg_ether_cmdmulti(struct lagg_port *lp, int set)
1018 {
1019 struct lagg_softc *sc = lp->lp_softc;
1020 struct ifnet *ifp = lp->lp_ifp;
1021 struct ifnet *scifp = sc->sc_ifp;
1022 struct lagg_mc *mc;
1023 struct ifmultiaddr *ifma, *rifma = NULL;
1024 struct sockaddr_dl sdl;
1025 int error;
1026
1027 LAGG_WLOCK_ASSERT(sc);
1028
1029 bzero((char *)&sdl, sizeof(sdl));
1030 sdl.sdl_len = sizeof(sdl);
1031 sdl.sdl_family = AF_LINK;
1032 sdl.sdl_type = IFT_ETHER;
1033 sdl.sdl_alen = ETHER_ADDR_LEN;
1034 sdl.sdl_index = ifp->if_index;
1035
1036 if (set) {
1037 TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
1038 if (ifma->ifma_addr->sa_family != AF_LINK)
1039 continue;
1040 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1041 LLADDR(&sdl), ETHER_ADDR_LEN);
1042
1043 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
1044 if (error)
1045 return (error);
1046 mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT);
1047 if (mc == NULL)
1048 return (ENOMEM);
1049 mc->mc_ifma = rifma;
1050 SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
1051 }
1052 } else {
1053 while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
1054 SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
1055 if_delmulti_ifma(mc->mc_ifma);
1056 free(mc, M_DEVBUF);
1057 }
1058 }
1059 return (0);
1060 }
1061
1062 /* Handle a ref counted flag that should be set on the lagg port as well */
1063 static int
1064 lagg_setflag(struct lagg_port *lp, int flag, int status,
1065 int (*func)(struct ifnet *, int))
1066 {
1067 struct lagg_softc *sc = lp->lp_softc;
1068 struct ifnet *scifp = sc->sc_ifp;
1069 struct ifnet *ifp = lp->lp_ifp;
1070 int error;
1071
1072 LAGG_WLOCK_ASSERT(sc);
1073
1074 status = status ? (scifp->if_flags & flag) : 0;
1075 /* Now "status" contains the flag value or 0 */
1076
1077 /*
1078 * See if recorded ports status is different from what
1079 * we want it to be. If it is, flip it. We record ports
1080 * status in lp_ifflags so that we won't clear ports flag
1081 * we haven't set. In fact, we don't clear or set ports
1082 * flags directly, but get or release references to them.
1083 * That's why we can be sure that recorded flags still are
1084 * in accord with actual ports flags.
1085 */
1086 if (status != (lp->lp_ifflags & flag)) {
1087 error = (*func)(ifp, status);
1088 if (error)
1089 return (error);
1090 lp->lp_ifflags &= ~flag;
1091 lp->lp_ifflags |= status;
1092 }
1093 return (0);
1094 }
1095
1096 /*
1097 * Handle IFF_* flags that require certain changes on the lagg port
1098 * if "status" is true, update ports flags respective to the lagg
1099 * if "status" is false, forcedly clear the flags set on port.
1100 */
1101 static int
1102 lagg_setflags(struct lagg_port *lp, int status)
1103 {
1104 int error, i;
1105
1106 for (i = 0; lagg_pflags[i].flag; i++) {
1107 error = lagg_setflag(lp, lagg_pflags[i].flag,
1108 status, lagg_pflags[i].func);
1109 if (error)
1110 return (error);
1111 }
1112 return (0);
1113 }
1114
1115 static void
1116 lagg_start(struct ifnet *ifp)
1117 {
1118 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1119 struct mbuf *m;
1120 int error = 0;
1121
1122 LAGG_RLOCK(sc);
1123 for (;; error = 0) {
1124 IFQ_DEQUEUE(&ifp->if_snd, m);
1125 if (m == NULL)
1126 break;
1127
1128 ETHER_BPF_MTAP(ifp, m);
1129
1130 /* We need a Tx algorithm and at least one port */
1131 if (sc->sc_proto != LAGG_PROTO_NONE && sc->sc_count)
1132 error = (*sc->sc_start)(sc, m);
1133 else
1134 m_freem(m);
1135
1136 if (error == 0)
1137 ifp->if_opackets++;
1138 else
1139 ifp->if_oerrors++;
1140 }
1141 LAGG_RUNLOCK(sc);
1142
1143 return;
1144 }
1145
1146 static struct mbuf *
1147 lagg_input(struct ifnet *ifp, struct mbuf *m)
1148 {
1149 struct lagg_port *lp = ifp->if_lagg;
1150 struct lagg_softc *sc = lp->lp_softc;
1151 struct ifnet *scifp = sc->sc_ifp;
1152
1153 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1154 (lp->lp_flags & LAGG_PORT_DISABLED) ||
1155 sc->sc_proto == LAGG_PROTO_NONE) {
1156 m_freem(m);
1157 return (NULL);
1158 }
1159
1160 LAGG_RLOCK(sc);
1161 ETHER_BPF_MTAP(scifp, m);
1162
1163 m = (*sc->sc_input)(sc, lp, m);
1164
1165 if (m != NULL) {
1166 scifp->if_ipackets++;
1167 scifp->if_ibytes += m->m_pkthdr.len;
1168
1169 if (scifp->if_flags & IFF_MONITOR) {
1170 m_freem(m);
1171 m = NULL;
1172 }
1173 }
1174
1175 LAGG_RUNLOCK(sc);
1176 return (m);
1177 }
1178
1179 static int
1180 lagg_media_change(struct ifnet *ifp)
1181 {
1182 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1183
1184 if (sc->sc_ifflags & IFF_DEBUG)
1185 printf("%s\n", __func__);
1186
1187 /* Ignore */
1188 return (0);
1189 }
1190
1191 static void
1192 lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1193 {
1194 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1195 struct lagg_port *lp;
1196
1197 imr->ifm_status = IFM_AVALID;
1198 imr->ifm_active = IFM_ETHER | IFM_AUTO;
1199
1200 LAGG_RLOCK(sc);
1201 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1202 if (LAGG_PORTACTIVE(lp))
1203 imr->ifm_status |= IFM_ACTIVE;
1204 }
1205 LAGG_RUNLOCK(sc);
1206 }
1207
1208 static void
1209 lagg_linkstate(struct lagg_softc *sc)
1210 {
1211 struct lagg_port *lp;
1212 int new_link = LINK_STATE_DOWN;
1213
1214 /* Our link is considered up if at least one of our ports is active */
1215 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1216 if (lp->lp_link_state == LINK_STATE_UP) {
1217 new_link = LINK_STATE_UP;
1218 break;
1219 }
1220 }
1221 if_link_state_change(sc->sc_ifp, new_link);
1222 }
1223
1224 static void
1225 lagg_port_state(struct ifnet *ifp, int state)
1226 {
1227 struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
1228 struct lagg_softc *sc = NULL;
1229
1230 if (lp != NULL)
1231 sc = lp->lp_softc;
1232 if (sc == NULL)
1233 return;
1234
1235 LAGG_WLOCK(sc);
1236 lagg_linkstate(sc);
1237 if (sc->sc_linkstate != NULL)
1238 (*sc->sc_linkstate)(lp);
1239 LAGG_WUNLOCK(sc);
1240 }
1241
1242 struct lagg_port *
1243 lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
1244 {
1245 struct lagg_port *lp_next, *rval = NULL;
1246 // int new_link = LINK_STATE_DOWN;
1247
1248 LAGG_RLOCK_ASSERT(sc);
1249 /*
1250 * Search a port which reports an active link state.
1251 */
1252
1253 if (lp == NULL)
1254 goto search;
1255 if (LAGG_PORTACTIVE(lp)) {
1256 rval = lp;
1257 goto found;
1258 }
1259 if ((lp_next = SLIST_NEXT(lp, lp_entries)) != NULL &&
1260 LAGG_PORTACTIVE(lp_next)) {
1261 rval = lp_next;
1262 goto found;
1263 }
1264
1265 search:
1266 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1267 if (LAGG_PORTACTIVE(lp_next)) {
1268 rval = lp_next;
1269 goto found;
1270 }
1271 }
1272
1273 found:
1274 if (rval != NULL) {
1275 /*
1276 * The IEEE 802.1D standard assumes that a lagg with
1277 * multiple ports is always full duplex. This is valid
1278 * for load sharing laggs and if at least two links
1279 * are active. Unfortunately, checking the latter would
1280 * be too expensive at this point.
1281 XXX
1282 if ((sc->sc_capabilities & IFCAP_LAGG_FULLDUPLEX) &&
1283 (sc->sc_count > 1))
1284 new_link = LINK_STATE_FULL_DUPLEX;
1285 else
1286 new_link = rval->lp_link_state;
1287 */
1288 }
1289
1290 return (rval);
1291 }
1292
1293 static const void *
1294 lagg_gethdr(struct mbuf *m, u_int off, u_int len, void *buf)
1295 {
1296 if (m->m_pkthdr.len < (off + len)) {
1297 return (NULL);
1298 } else if (m->m_len < (off + len)) {
1299 m_copydata(m, off, len, buf);
1300 return (buf);
1301 }
1302 return (mtod(m, char *) + off);
1303 }
1304
1305 uint32_t
1306 lagg_hashmbuf(struct mbuf *m, uint32_t key)
1307 {
1308 uint16_t etype;
1309 uint32_t p = 0;
1310 int off;
1311 struct ether_header *eh;
1312 struct ether_vlan_header vlanbuf;
1313 const struct ether_vlan_header *vlan;
1314 #ifdef INET
1315 const struct ip *ip;
1316 struct ip ipbuf;
1317 #endif
1318 #ifdef INET6
1319 const struct ip6_hdr *ip6;
1320 struct ip6_hdr ip6buf;
1321 uint32_t flow;
1322 #endif
1323
1324 off = sizeof(*eh);
1325 if (m->m_len < off)
1326 goto out;
1327 eh = mtod(m, struct ether_header *);
1328 etype = ntohs(eh->ether_type);
1329 p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, key);
1330 p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p);
1331
1332 /* Special handling for encapsulating VLAN frames */
1333 if (m->m_flags & M_VLANTAG) {
1334 p = hash32_buf(&m->m_pkthdr.ether_vtag,
1335 sizeof(m->m_pkthdr.ether_vtag), p);
1336 } else if (etype == ETHERTYPE_VLAN) {
1337 vlan = lagg_gethdr(m, off, sizeof(*vlan), &vlanbuf);
1338 if (vlan == NULL)
1339 goto out;
1340
1341 p = hash32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p);
1342 etype = ntohs(vlan->evl_proto);
1343 off += sizeof(*vlan) - sizeof(*eh);
1344 }
1345
1346 switch (etype) {
1347 #ifdef INET
1348 case ETHERTYPE_IP:
1349 ip = lagg_gethdr(m, off, sizeof(*ip), &ipbuf);
1350 if (ip == NULL)
1351 goto out;
1352
1353 p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p);
1354 p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p);
1355 break;
1356 #endif
1357 #ifdef INET6
1358 case ETHERTYPE_IPV6:
1359 ip6 = lagg_gethdr(m, off, sizeof(*ip6), &ip6buf);
1360 if (ip6 == NULL)
1361 goto out;
1362
1363 p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p);
1364 p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p);
1365 flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK;
1366 p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */
1367 break;
1368 #endif
1369 }
1370 out:
1371 return (p);
1372 }
1373
1374 int
1375 lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
1376 {
1377 int error = 0;
1378
1379 IFQ_HANDOFF(ifp, m, error);
1380 if (error)
1381 ifp->if_oerrors++;
1382 return (error);
1383 }
1384
1385 /*
1386 * Simple round robin aggregation
1387 */
1388
1389 static int
1390 lagg_rr_attach(struct lagg_softc *sc)
1391 {
1392 sc->sc_detach = lagg_rr_detach;
1393 sc->sc_start = lagg_rr_start;
1394 sc->sc_input = lagg_rr_input;
1395 sc->sc_port_create = NULL;
1396 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1397 sc->sc_seq = 0;
1398
1399 return (0);
1400 }
1401
1402 static int
1403 lagg_rr_detach(struct lagg_softc *sc)
1404 {
1405 return (0);
1406 }
1407
1408 static int
1409 lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
1410 {
1411 struct lagg_port *lp;
1412 uint32_t p;
1413
1414 p = atomic_fetchadd_32(&sc->sc_seq, 1);
1415 p %= sc->sc_count;
1416 lp = SLIST_FIRST(&sc->sc_ports);
1417 while (p--)
1418 lp = SLIST_NEXT(lp, lp_entries);
1419
1420 /*
1421 * Check the port's link state. This will return the next active
1422 * port if the link is down or the port is NULL.
1423 */
1424 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1425 m_freem(m);
1426 return (ENOENT);
1427 }
1428
1429 /* Send mbuf */
1430 return (lagg_enqueue(lp->lp_ifp, m));
1431 }
1432
1433 static struct mbuf *
1434 lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1435 {
1436 struct ifnet *ifp = sc->sc_ifp;
1437
1438 /* Just pass in the packet to our lagg device */
1439 m->m_pkthdr.rcvif = ifp;
1440
1441 return (m);
1442 }
1443
1444 /*
1445 * Active failover
1446 */
1447
1448 static int
1449 lagg_fail_attach(struct lagg_softc *sc)
1450 {
1451 sc->sc_detach = lagg_fail_detach;
1452 sc->sc_start = lagg_fail_start;
1453 sc->sc_input = lagg_fail_input;
1454 sc->sc_port_create = NULL;
1455 sc->sc_port_destroy = NULL;
1456
1457 return (0);
1458 }
1459
1460 static int
1461 lagg_fail_detach(struct lagg_softc *sc)
1462 {
1463 return (0);
1464 }
1465
1466 static int
1467 lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
1468 {
1469 struct lagg_port *lp;
1470
1471 /* Use the master port if active or the next available port */
1472 if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
1473 m_freem(m);
1474 return (ENOENT);
1475 }
1476
1477 /* Send mbuf */
1478 return (lagg_enqueue(lp->lp_ifp, m));
1479 }
1480
1481 static struct mbuf *
1482 lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1483 {
1484 struct ifnet *ifp = sc->sc_ifp;
1485 struct lagg_port *tmp_tp;
1486
1487 if (lp == sc->sc_primary || lagg_failover_rx_all) {
1488 m->m_pkthdr.rcvif = ifp;
1489 return (m);
1490 }
1491
1492 if (!LAGG_PORTACTIVE(sc->sc_primary)) {
1493 tmp_tp = lagg_link_active(sc, sc->sc_primary);
1494 /*
1495 * If tmp_tp is null, we've recieved a packet when all
1496 * our links are down. Weird, but process it anyways.
1497 */
1498 if ((tmp_tp == NULL || tmp_tp == lp)) {
1499 m->m_pkthdr.rcvif = ifp;
1500 return (m);
1501 }
1502 }
1503
1504 m_freem(m);
1505 return (NULL);
1506 }
1507
1508 /*
1509 * Loadbalancing
1510 */
1511
1512 static int
1513 lagg_lb_attach(struct lagg_softc *sc)
1514 {
1515 struct lagg_port *lp;
1516 struct lagg_lb *lb;
1517
1518 if ((lb = (struct lagg_lb *)malloc(sizeof(struct lagg_lb),
1519 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
1520 return (ENOMEM);
1521
1522 sc->sc_detach = lagg_lb_detach;
1523 sc->sc_start = lagg_lb_start;
1524 sc->sc_input = lagg_lb_input;
1525 sc->sc_port_create = lagg_lb_port_create;
1526 sc->sc_port_destroy = lagg_lb_port_destroy;
1527 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1528
1529 lb->lb_key = arc4random();
1530 sc->sc_psc = (caddr_t)lb;
1531
1532 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1533 lagg_lb_port_create(lp);
1534
1535 return (0);
1536 }
1537
1538 static int
1539 lagg_lb_detach(struct lagg_softc *sc)
1540 {
1541 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1542 if (lb != NULL)
1543 free(lb, M_DEVBUF);
1544 return (0);
1545 }
1546
1547 static int
1548 lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
1549 {
1550 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1551 struct lagg_port *lp_next;
1552 int i = 0;
1553
1554 bzero(&lb->lb_ports, sizeof(lb->lb_ports));
1555 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1556 if (lp_next == lp)
1557 continue;
1558 if (i >= LAGG_MAX_PORTS)
1559 return (EINVAL);
1560 if (sc->sc_ifflags & IFF_DEBUG)
1561 printf("%s: port %s at index %d\n",
1562 sc->sc_ifname, lp_next->lp_ifname, i);
1563 lb->lb_ports[i++] = lp_next;
1564 }
1565
1566 return (0);
1567 }
1568
1569 static int
1570 lagg_lb_port_create(struct lagg_port *lp)
1571 {
1572 struct lagg_softc *sc = lp->lp_softc;
1573 return (lagg_lb_porttable(sc, NULL));
1574 }
1575
1576 static void
1577 lagg_lb_port_destroy(struct lagg_port *lp)
1578 {
1579 struct lagg_softc *sc = lp->lp_softc;
1580 lagg_lb_porttable(sc, lp);
1581 }
1582
1583 static int
1584 lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
1585 {
1586 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1587 struct lagg_port *lp = NULL;
1588 uint32_t p = 0;
1589
1590 p = lagg_hashmbuf(m, lb->lb_key);
1591 p %= sc->sc_count;
1592 lp = lb->lb_ports[p];
1593
1594 /*
1595 * Check the port's link state. This will return the next active
1596 * port if the link is down or the port is NULL.
1597 */
1598 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1599 m_freem(m);
1600 return (ENOENT);
1601 }
1602
1603 /* Send mbuf */
1604 return (lagg_enqueue(lp->lp_ifp, m));
1605 }
1606
1607 static struct mbuf *
1608 lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1609 {
1610 struct ifnet *ifp = sc->sc_ifp;
1611
1612 /* Just pass in the packet to our lagg device */
1613 m->m_pkthdr.rcvif = ifp;
1614
1615 return (m);
1616 }
1617
1618 /*
1619 * 802.3ad LACP
1620 */
1621
1622 static int
1623 lagg_lacp_attach(struct lagg_softc *sc)
1624 {
1625 struct lagg_port *lp;
1626 int error;
1627
1628 sc->sc_detach = lagg_lacp_detach;
1629 sc->sc_port_create = lacp_port_create;
1630 sc->sc_port_destroy = lacp_port_destroy;
1631 sc->sc_linkstate = lacp_linkstate;
1632 sc->sc_start = lagg_lacp_start;
1633 sc->sc_input = lagg_lacp_input;
1634 sc->sc_init = lacp_init;
1635 sc->sc_stop = lacp_stop;
1636 sc->sc_lladdr = lagg_lacp_lladdr;
1637 sc->sc_req = lacp_req;
1638 sc->sc_portreq = lacp_portreq;
1639
1640 error = lacp_attach(sc);
1641 if (error)
1642 return (error);
1643
1644 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1645 lacp_port_create(lp);
1646
1647 return (error);
1648 }
1649
1650 static int
1651 lagg_lacp_detach(struct lagg_softc *sc)
1652 {
1653 struct lagg_port *lp;
1654 int error;
1655
1656 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1657 lacp_port_destroy(lp);
1658
1659 /* unlocking is safe here */
1660 LAGG_WUNLOCK(sc);
1661 error = lacp_detach(sc);
1662 LAGG_WLOCK(sc);
1663
1664 return (error);
1665 }
1666
1667 static void
1668 lagg_lacp_lladdr(struct lagg_softc *sc)
1669 {
1670 struct lagg_port *lp;
1671
1672 /* purge all the lacp ports */
1673 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1674 lacp_port_destroy(lp);
1675
1676 /* add them back in */
1677 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1678 lacp_port_create(lp);
1679 }
1680
1681 static int
1682 lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
1683 {
1684 struct lagg_port *lp;
1685
1686 lp = lacp_select_tx_port(sc, m);
1687 if (lp == NULL) {
1688 m_freem(m);
1689 return (EBUSY);
1690 }
1691
1692 /* Send mbuf */
1693 return (lagg_enqueue(lp->lp_ifp, m));
1694 }
1695
1696 static struct mbuf *
1697 lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1698 {
1699 struct ifnet *ifp = sc->sc_ifp;
1700 struct ether_header *eh;
1701 u_short etype;
1702
1703 eh = mtod(m, struct ether_header *);
1704 etype = ntohs(eh->ether_type);
1705
1706 /* Tap off LACP control messages */
1707 if (etype == ETHERTYPE_SLOW) {
1708 m = lacp_input(lp, m);
1709 if (m == NULL)
1710 return (NULL);
1711 }
1712
1713 /*
1714 * If the port is not collecting or not in the active aggregator then
1715 * free and return.
1716 */
1717 if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
1718 m_freem(m);
1719 return (NULL);
1720 }
1721
1722 m->m_pkthdr.rcvif = ifp;
1723 return (m);
1724 }
Cache object: de70464329ab6e5dafe2e0dafdcc8e49
|