FreeBSD/Linux Kernel Cross Reference
sys/net/if_lagg.c
1 /* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
6 * Copyright (c) 2014, 2016 Marcelo Araujo <araujo@FreeBSD.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/cdefs.h>
22 __FBSDID("$FreeBSD: releng/12.0/sys/net/if_lagg.c 339251 2018-10-09 13:26:06Z jtl $");
23
24 #include "opt_inet.h"
25 #include "opt_inet6.h"
26 #include "opt_ratelimit.h"
27
28 #include <sys/param.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/mbuf.h>
32 #include <sys/queue.h>
33 #include <sys/socket.h>
34 #include <sys/sockio.h>
35 #include <sys/sysctl.h>
36 #include <sys/module.h>
37 #include <sys/priv.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/lock.h>
41 #include <sys/rmlock.h>
42 #include <sys/sx.h>
43 #include <sys/taskqueue.h>
44 #include <sys/eventhandler.h>
45
46 #include <net/ethernet.h>
47 #include <net/if.h>
48 #include <net/if_clone.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/if_types.h>
53 #include <net/if_var.h>
54 #include <net/bpf.h>
55 #include <net/vnet.h>
56
57 #if defined(INET) || defined(INET6)
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #endif
61 #ifdef INET
62 #include <netinet/in_systm.h>
63 #include <netinet/if_ether.h>
64 #endif
65
66 #ifdef INET6
67 #include <netinet/ip6.h>
68 #include <netinet6/in6_var.h>
69 #include <netinet6/in6_ifattach.h>
70 #endif
71
72 #include <net/if_vlan_var.h>
73 #include <net/if_lagg.h>
74 #include <net/ieee8023ad_lacp.h>
75
76 #define LAGG_RLOCK() struct epoch_tracker lagg_et; epoch_enter_preempt(net_epoch_preempt, &lagg_et)
77 #define LAGG_RUNLOCK() epoch_exit_preempt(net_epoch_preempt, &lagg_et)
78 #define LAGG_RLOCK_ASSERT() MPASS(in_epoch(net_epoch_preempt))
79 #define LAGG_UNLOCK_ASSERT() MPASS(!in_epoch(net_epoch_preempt))
80
81 #define LAGG_SX_INIT(_sc) sx_init(&(_sc)->sc_sx, "if_lagg sx")
82 #define LAGG_SX_DESTROY(_sc) sx_destroy(&(_sc)->sc_sx)
83 #define LAGG_XLOCK(_sc) sx_xlock(&(_sc)->sc_sx)
84 #define LAGG_XUNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx)
85 #define LAGG_SXLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_LOCKED)
86 #define LAGG_XLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_XLOCKED)
87
88 /* Special flags we should propagate to the lagg ports. */
89 static struct {
90 int flag;
91 int (*func)(struct ifnet *, int);
92 } lagg_pflags[] = {
93 {IFF_PROMISC, ifpromisc},
94 {IFF_ALLMULTI, if_allmulti},
95 {0, NULL}
96 };
97
98 VNET_DEFINE(SLIST_HEAD(__trhead, lagg_softc), lagg_list); /* list of laggs */
99 #define V_lagg_list VNET(lagg_list)
100 VNET_DEFINE_STATIC(struct mtx, lagg_list_mtx);
101 #define V_lagg_list_mtx VNET(lagg_list_mtx)
102 #define LAGG_LIST_LOCK_INIT(x) mtx_init(&V_lagg_list_mtx, \
103 "if_lagg list", NULL, MTX_DEF)
104 #define LAGG_LIST_LOCK_DESTROY(x) mtx_destroy(&V_lagg_list_mtx)
105 #define LAGG_LIST_LOCK(x) mtx_lock(&V_lagg_list_mtx)
106 #define LAGG_LIST_UNLOCK(x) mtx_unlock(&V_lagg_list_mtx)
107 eventhandler_tag lagg_detach_cookie = NULL;
108
109 static int lagg_clone_create(struct if_clone *, int, caddr_t);
110 static void lagg_clone_destroy(struct ifnet *);
111 VNET_DEFINE_STATIC(struct if_clone *, lagg_cloner);
112 #define V_lagg_cloner VNET(lagg_cloner)
113 static const char laggname[] = "lagg";
114
115 static void lagg_capabilities(struct lagg_softc *);
116 static int lagg_port_create(struct lagg_softc *, struct ifnet *);
117 static int lagg_port_destroy(struct lagg_port *, int);
118 static struct mbuf *lagg_input(struct ifnet *, struct mbuf *);
119 static void lagg_linkstate(struct lagg_softc *);
120 static void lagg_port_state(struct ifnet *, int);
121 static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
122 static int lagg_port_output(struct ifnet *, struct mbuf *,
123 const struct sockaddr *, struct route *);
124 static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
125 #ifdef LAGG_PORT_STACKING
126 static int lagg_port_checkstacking(struct lagg_softc *);
127 #endif
128 static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
129 static void lagg_init(void *);
130 static void lagg_stop(struct lagg_softc *);
131 static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
132 #ifdef RATELIMIT
133 static int lagg_snd_tag_alloc(struct ifnet *,
134 union if_snd_tag_alloc_params *,
135 struct m_snd_tag **);
136 #endif
137 static int lagg_setmulti(struct lagg_port *);
138 static int lagg_clrmulti(struct lagg_port *);
139 static int lagg_setcaps(struct lagg_port *, int cap);
140 static int lagg_setflag(struct lagg_port *, int, int,
141 int (*func)(struct ifnet *, int));
142 static int lagg_setflags(struct lagg_port *, int status);
143 static uint64_t lagg_get_counter(struct ifnet *ifp, ift_counter cnt);
144 static int lagg_transmit(struct ifnet *, struct mbuf *);
145 static void lagg_qflush(struct ifnet *);
146 static int lagg_media_change(struct ifnet *);
147 static void lagg_media_status(struct ifnet *, struct ifmediareq *);
148 static struct lagg_port *lagg_link_active(struct lagg_softc *,
149 struct lagg_port *);
150
151 /* Simple round robin */
152 static void lagg_rr_attach(struct lagg_softc *);
153 static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
154 static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
155 struct mbuf *);
156
157 /* Active failover */
158 static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
159 static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
160 struct mbuf *);
161
162 /* Loadbalancing */
163 static void lagg_lb_attach(struct lagg_softc *);
164 static void lagg_lb_detach(struct lagg_softc *);
165 static int lagg_lb_port_create(struct lagg_port *);
166 static void lagg_lb_port_destroy(struct lagg_port *);
167 static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
168 static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
169 struct mbuf *);
170 static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
171
172 /* Broadcast */
173 static int lagg_bcast_start(struct lagg_softc *, struct mbuf *);
174 static struct mbuf *lagg_bcast_input(struct lagg_softc *, struct lagg_port *,
175 struct mbuf *);
176
177 /* 802.3ad LACP */
178 static void lagg_lacp_attach(struct lagg_softc *);
179 static void lagg_lacp_detach(struct lagg_softc *);
180 static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
181 static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
182 struct mbuf *);
183 static void lagg_lacp_lladdr(struct lagg_softc *);
184
185 /* lagg protocol table */
186 static const struct lagg_proto {
187 lagg_proto pr_num;
188 void (*pr_attach)(struct lagg_softc *);
189 void (*pr_detach)(struct lagg_softc *);
190 int (*pr_start)(struct lagg_softc *, struct mbuf *);
191 struct mbuf * (*pr_input)(struct lagg_softc *, struct lagg_port *,
192 struct mbuf *);
193 int (*pr_addport)(struct lagg_port *);
194 void (*pr_delport)(struct lagg_port *);
195 void (*pr_linkstate)(struct lagg_port *);
196 void (*pr_init)(struct lagg_softc *);
197 void (*pr_stop)(struct lagg_softc *);
198 void (*pr_lladdr)(struct lagg_softc *);
199 void (*pr_request)(struct lagg_softc *, void *);
200 void (*pr_portreq)(struct lagg_port *, void *);
201 } lagg_protos[] = {
202 {
203 .pr_num = LAGG_PROTO_NONE
204 },
205 {
206 .pr_num = LAGG_PROTO_ROUNDROBIN,
207 .pr_attach = lagg_rr_attach,
208 .pr_start = lagg_rr_start,
209 .pr_input = lagg_rr_input,
210 },
211 {
212 .pr_num = LAGG_PROTO_FAILOVER,
213 .pr_start = lagg_fail_start,
214 .pr_input = lagg_fail_input,
215 },
216 {
217 .pr_num = LAGG_PROTO_LOADBALANCE,
218 .pr_attach = lagg_lb_attach,
219 .pr_detach = lagg_lb_detach,
220 .pr_start = lagg_lb_start,
221 .pr_input = lagg_lb_input,
222 .pr_addport = lagg_lb_port_create,
223 .pr_delport = lagg_lb_port_destroy,
224 },
225 {
226 .pr_num = LAGG_PROTO_LACP,
227 .pr_attach = lagg_lacp_attach,
228 .pr_detach = lagg_lacp_detach,
229 .pr_start = lagg_lacp_start,
230 .pr_input = lagg_lacp_input,
231 .pr_addport = lacp_port_create,
232 .pr_delport = lacp_port_destroy,
233 .pr_linkstate = lacp_linkstate,
234 .pr_init = lacp_init,
235 .pr_stop = lacp_stop,
236 .pr_lladdr = lagg_lacp_lladdr,
237 .pr_request = lacp_req,
238 .pr_portreq = lacp_portreq,
239 },
240 {
241 .pr_num = LAGG_PROTO_BROADCAST,
242 .pr_start = lagg_bcast_start,
243 .pr_input = lagg_bcast_input,
244 },
245 };
246
247 SYSCTL_DECL(_net_link);
248 SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW, 0,
249 "Link Aggregation");
250
251 /* Allow input on any failover links */
252 VNET_DEFINE_STATIC(int, lagg_failover_rx_all);
253 #define V_lagg_failover_rx_all VNET(lagg_failover_rx_all)
254 SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW | CTLFLAG_VNET,
255 &VNET_NAME(lagg_failover_rx_all), 0,
256 "Accept input from any interface in a failover lagg");
257
258 /* Default value for using flowid */
259 VNET_DEFINE_STATIC(int, def_use_flowid) = 0;
260 #define V_def_use_flowid VNET(def_use_flowid)
261 SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_flowid, CTLFLAG_RWTUN,
262 &VNET_NAME(def_use_flowid), 0,
263 "Default setting for using flow id for load sharing");
264
265 /* Default value for flowid shift */
266 VNET_DEFINE_STATIC(int, def_flowid_shift) = 16;
267 #define V_def_flowid_shift VNET(def_flowid_shift)
268 SYSCTL_INT(_net_link_lagg, OID_AUTO, default_flowid_shift, CTLFLAG_RWTUN,
269 &VNET_NAME(def_flowid_shift), 0,
270 "Default setting for flowid shift for load sharing");
271
272 static void
273 vnet_lagg_init(const void *unused __unused)
274 {
275
276 LAGG_LIST_LOCK_INIT();
277 SLIST_INIT(&V_lagg_list);
278 V_lagg_cloner = if_clone_simple(laggname, lagg_clone_create,
279 lagg_clone_destroy, 0);
280 }
281 VNET_SYSINIT(vnet_lagg_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
282 vnet_lagg_init, NULL);
283
284 static void
285 vnet_lagg_uninit(const void *unused __unused)
286 {
287
288 if_clone_detach(V_lagg_cloner);
289 LAGG_LIST_LOCK_DESTROY();
290 }
291 VNET_SYSUNINIT(vnet_lagg_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
292 vnet_lagg_uninit, NULL);
293
294 static int
295 lagg_modevent(module_t mod, int type, void *data)
296 {
297
298 switch (type) {
299 case MOD_LOAD:
300 lagg_input_p = lagg_input;
301 lagg_linkstate_p = lagg_port_state;
302 lagg_detach_cookie = EVENTHANDLER_REGISTER(
303 ifnet_departure_event, lagg_port_ifdetach, NULL,
304 EVENTHANDLER_PRI_ANY);
305 break;
306 case MOD_UNLOAD:
307 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
308 lagg_detach_cookie);
309 lagg_input_p = NULL;
310 lagg_linkstate_p = NULL;
311 break;
312 default:
313 return (EOPNOTSUPP);
314 }
315 return (0);
316 }
317
318 static moduledata_t lagg_mod = {
319 "if_lagg",
320 lagg_modevent,
321 0
322 };
323
324 DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
325 MODULE_VERSION(if_lagg, 1);
326
327 static void
328 lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr)
329 {
330
331 LAGG_XLOCK_ASSERT(sc);
332 KASSERT(sc->sc_proto == LAGG_PROTO_NONE, ("%s: sc %p has proto",
333 __func__, sc));
334
335 if (sc->sc_ifflags & IFF_DEBUG)
336 if_printf(sc->sc_ifp, "using proto %u\n", pr);
337
338 if (lagg_protos[pr].pr_attach != NULL)
339 lagg_protos[pr].pr_attach(sc);
340 sc->sc_proto = pr;
341 }
342
343 static void
344 lagg_proto_detach(struct lagg_softc *sc)
345 {
346 lagg_proto pr;
347
348 LAGG_XLOCK_ASSERT(sc);
349 pr = sc->sc_proto;
350 sc->sc_proto = LAGG_PROTO_NONE;
351
352 if (lagg_protos[pr].pr_detach != NULL)
353 lagg_protos[pr].pr_detach(sc);
354 }
355
356 static int
357 lagg_proto_start(struct lagg_softc *sc, struct mbuf *m)
358 {
359
360 return (lagg_protos[sc->sc_proto].pr_start(sc, m));
361 }
362
363 static struct mbuf *
364 lagg_proto_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
365 {
366
367 return (lagg_protos[sc->sc_proto].pr_input(sc, lp, m));
368 }
369
370 static int
371 lagg_proto_addport(struct lagg_softc *sc, struct lagg_port *lp)
372 {
373
374 if (lagg_protos[sc->sc_proto].pr_addport == NULL)
375 return (0);
376 else
377 return (lagg_protos[sc->sc_proto].pr_addport(lp));
378 }
379
380 static void
381 lagg_proto_delport(struct lagg_softc *sc, struct lagg_port *lp)
382 {
383
384 if (lagg_protos[sc->sc_proto].pr_delport != NULL)
385 lagg_protos[sc->sc_proto].pr_delport(lp);
386 }
387
388 static void
389 lagg_proto_linkstate(struct lagg_softc *sc, struct lagg_port *lp)
390 {
391
392 if (lagg_protos[sc->sc_proto].pr_linkstate != NULL)
393 lagg_protos[sc->sc_proto].pr_linkstate(lp);
394 }
395
396 static void
397 lagg_proto_init(struct lagg_softc *sc)
398 {
399
400 if (lagg_protos[sc->sc_proto].pr_init != NULL)
401 lagg_protos[sc->sc_proto].pr_init(sc);
402 }
403
404 static void
405 lagg_proto_stop(struct lagg_softc *sc)
406 {
407
408 if (lagg_protos[sc->sc_proto].pr_stop != NULL)
409 lagg_protos[sc->sc_proto].pr_stop(sc);
410 }
411
412 static void
413 lagg_proto_lladdr(struct lagg_softc *sc)
414 {
415
416 if (lagg_protos[sc->sc_proto].pr_lladdr != NULL)
417 lagg_protos[sc->sc_proto].pr_lladdr(sc);
418 }
419
420 static void
421 lagg_proto_request(struct lagg_softc *sc, void *v)
422 {
423
424 if (lagg_protos[sc->sc_proto].pr_request != NULL)
425 lagg_protos[sc->sc_proto].pr_request(sc, v);
426 }
427
428 static void
429 lagg_proto_portreq(struct lagg_softc *sc, struct lagg_port *lp, void *v)
430 {
431
432 if (lagg_protos[sc->sc_proto].pr_portreq != NULL)
433 lagg_protos[sc->sc_proto].pr_portreq(lp, v);
434 }
435
436 /*
437 * This routine is run via an vlan
438 * config EVENT
439 */
440 static void
441 lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
442 {
443 struct lagg_softc *sc = ifp->if_softc;
444 struct lagg_port *lp;
445
446 if (ifp->if_softc != arg) /* Not our event */
447 return;
448
449 LAGG_RLOCK();
450 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
451 EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag);
452 LAGG_RUNLOCK();
453 }
454
455 /*
456 * This routine is run via an vlan
457 * unconfig EVENT
458 */
459 static void
460 lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
461 {
462 struct lagg_softc *sc = ifp->if_softc;
463 struct lagg_port *lp;
464
465 if (ifp->if_softc != arg) /* Not our event */
466 return;
467
468 LAGG_RLOCK();
469 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
470 EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag);
471 LAGG_RUNLOCK();
472 }
473
474 static int
475 lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
476 {
477 struct lagg_softc *sc;
478 struct ifnet *ifp;
479 static const u_char eaddr[6]; /* 00:00:00:00:00:00 */
480
481 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
482 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
483 if (ifp == NULL) {
484 free(sc, M_DEVBUF);
485 return (ENOSPC);
486 }
487 LAGG_SX_INIT(sc);
488
489 LAGG_XLOCK(sc);
490 if (V_def_use_flowid)
491 sc->sc_opts |= LAGG_OPT_USE_FLOWID;
492 sc->flowid_shift = V_def_flowid_shift;
493
494 /* Hash all layers by default */
495 sc->sc_flags = MBUF_HASHFLAG_L2|MBUF_HASHFLAG_L3|MBUF_HASHFLAG_L4;
496
497 lagg_proto_attach(sc, LAGG_PROTO_DEFAULT);
498
499 CK_SLIST_INIT(&sc->sc_ports);
500
501 /* Initialise pseudo media types */
502 ifmedia_init(&sc->sc_media, 0, lagg_media_change,
503 lagg_media_status);
504 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
505 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
506
507 if_initname(ifp, laggname, unit);
508 ifp->if_softc = sc;
509 ifp->if_transmit = lagg_transmit;
510 ifp->if_qflush = lagg_qflush;
511 ifp->if_init = lagg_init;
512 ifp->if_ioctl = lagg_ioctl;
513 ifp->if_get_counter = lagg_get_counter;
514 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
515 #ifdef RATELIMIT
516 ifp->if_snd_tag_alloc = lagg_snd_tag_alloc;
517 ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS | IFCAP_TXRTLMT;
518 #else
519 ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS;
520 #endif
521
522 /*
523 * Attach as an ordinary ethernet device, children will be attached
524 * as special device IFT_IEEE8023ADLAG.
525 */
526 ether_ifattach(ifp, eaddr);
527
528 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
529 lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
530 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
531 lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
532
533 /* Insert into the global list of laggs */
534 LAGG_LIST_LOCK();
535 SLIST_INSERT_HEAD(&V_lagg_list, sc, sc_entries);
536 LAGG_LIST_UNLOCK();
537 LAGG_XUNLOCK(sc);
538
539 return (0);
540 }
541
542 static void
543 lagg_clone_destroy(struct ifnet *ifp)
544 {
545 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
546 struct lagg_port *lp;
547
548 LAGG_XLOCK(sc);
549 sc->sc_destroying = 1;
550 lagg_stop(sc);
551 ifp->if_flags &= ~IFF_UP;
552
553 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
554 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
555
556 /* Shutdown and remove lagg ports */
557 while ((lp = CK_SLIST_FIRST(&sc->sc_ports)) != NULL)
558 lagg_port_destroy(lp, 1);
559
560 /* Unhook the aggregation protocol */
561 lagg_proto_detach(sc);
562 LAGG_XUNLOCK(sc);
563
564 ifmedia_removeall(&sc->sc_media);
565 ether_ifdetach(ifp);
566 if_free(ifp);
567
568 LAGG_LIST_LOCK();
569 SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries);
570 LAGG_LIST_UNLOCK();
571
572 LAGG_SX_DESTROY(sc);
573 free(sc, M_DEVBUF);
574 }
575
576 static void
577 lagg_capabilities(struct lagg_softc *sc)
578 {
579 struct lagg_port *lp;
580 int cap, ena, pena;
581 uint64_t hwa;
582 struct ifnet_hw_tsomax hw_tsomax;
583
584 LAGG_XLOCK_ASSERT(sc);
585
586 /* Get common enabled capabilities for the lagg ports */
587 ena = ~0;
588 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
589 ena &= lp->lp_ifp->if_capenable;
590 ena = (ena == ~0 ? 0 : ena);
591
592 /*
593 * Apply common enabled capabilities back to the lagg ports.
594 * May require several iterations if they are dependent.
595 */
596 do {
597 pena = ena;
598 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
599 lagg_setcaps(lp, ena);
600 ena &= lp->lp_ifp->if_capenable;
601 }
602 } while (pena != ena);
603
604 /* Get other capabilities from the lagg ports */
605 cap = ~0;
606 hwa = ~(uint64_t)0;
607 memset(&hw_tsomax, 0, sizeof(hw_tsomax));
608 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
609 cap &= lp->lp_ifp->if_capabilities;
610 hwa &= lp->lp_ifp->if_hwassist;
611 if_hw_tsomax_common(lp->lp_ifp, &hw_tsomax);
612 }
613 cap = (cap == ~0 ? 0 : cap);
614 hwa = (hwa == ~(uint64_t)0 ? 0 : hwa);
615
616 if (sc->sc_ifp->if_capabilities != cap ||
617 sc->sc_ifp->if_capenable != ena ||
618 sc->sc_ifp->if_hwassist != hwa ||
619 if_hw_tsomax_update(sc->sc_ifp, &hw_tsomax) != 0) {
620 sc->sc_ifp->if_capabilities = cap;
621 sc->sc_ifp->if_capenable = ena;
622 sc->sc_ifp->if_hwassist = hwa;
623 getmicrotime(&sc->sc_ifp->if_lastchange);
624
625 if (sc->sc_ifflags & IFF_DEBUG)
626 if_printf(sc->sc_ifp,
627 "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
628 }
629 }
630
631 static int
632 lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
633 {
634 struct lagg_softc *sc_ptr;
635 struct lagg_port *lp, *tlp;
636 int error, i;
637 uint64_t *pval;
638
639 LAGG_XLOCK_ASSERT(sc);
640
641 /* Limit the maximal number of lagg ports */
642 if (sc->sc_count >= LAGG_MAX_PORTS)
643 return (ENOSPC);
644
645 /* Check if port has already been associated to a lagg */
646 if (ifp->if_lagg != NULL) {
647 /* Port is already in the current lagg? */
648 lp = (struct lagg_port *)ifp->if_lagg;
649 if (lp->lp_softc == sc)
650 return (EEXIST);
651 return (EBUSY);
652 }
653
654 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */
655 if (ifp->if_type != IFT_ETHER && ifp->if_type != IFT_L2VLAN)
656 return (EPROTONOSUPPORT);
657
658 /* Allow the first Ethernet member to define the MTU */
659 if (CK_SLIST_EMPTY(&sc->sc_ports))
660 sc->sc_ifp->if_mtu = ifp->if_mtu;
661 else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
662 if_printf(sc->sc_ifp, "invalid MTU for %s\n",
663 ifp->if_xname);
664 return (EINVAL);
665 }
666
667 lp = malloc(sizeof(struct lagg_port), M_DEVBUF, M_WAITOK|M_ZERO);
668 lp->lp_softc = sc;
669
670 /* Check if port is a stacked lagg */
671 LAGG_LIST_LOCK();
672 SLIST_FOREACH(sc_ptr, &V_lagg_list, sc_entries) {
673 if (ifp == sc_ptr->sc_ifp) {
674 LAGG_LIST_UNLOCK();
675 free(lp, M_DEVBUF);
676 return (EINVAL);
677 /* XXX disable stacking for the moment, its untested */
678 #ifdef LAGG_PORT_STACKING
679 lp->lp_flags |= LAGG_PORT_STACK;
680 if (lagg_port_checkstacking(sc_ptr) >=
681 LAGG_MAX_STACKING) {
682 LAGG_LIST_UNLOCK();
683 free(lp, M_DEVBUF);
684 return (E2BIG);
685 }
686 #endif
687 }
688 }
689 LAGG_LIST_UNLOCK();
690
691 if_ref(ifp);
692 lp->lp_ifp = ifp;
693
694 bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
695 lp->lp_ifcapenable = ifp->if_capenable;
696 if (CK_SLIST_EMPTY(&sc->sc_ports)) {
697 bcopy(IF_LLADDR(ifp), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
698 lagg_proto_lladdr(sc);
699 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
700 } else {
701 if_setlladdr(ifp, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
702 }
703 lagg_setflags(lp, 1);
704
705 if (CK_SLIST_EMPTY(&sc->sc_ports))
706 sc->sc_primary = lp;
707
708 /* Change the interface type */
709 lp->lp_iftype = ifp->if_type;
710 ifp->if_type = IFT_IEEE8023ADLAG;
711 ifp->if_lagg = lp;
712 lp->lp_ioctl = ifp->if_ioctl;
713 ifp->if_ioctl = lagg_port_ioctl;
714 lp->lp_output = ifp->if_output;
715 ifp->if_output = lagg_port_output;
716
717 /* Read port counters */
718 pval = lp->port_counters.val;
719 for (i = 0; i < IFCOUNTERS; i++, pval++)
720 *pval = ifp->if_get_counter(ifp, i);
721
722 /*
723 * Insert into the list of ports.
724 * Keep ports sorted by if_index. It is handy, when configuration
725 * is predictable and `ifconfig laggN create ...` command
726 * will lead to the same result each time.
727 */
728 LAGG_RLOCK();
729 CK_SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) {
730 if (tlp->lp_ifp->if_index < ifp->if_index && (
731 CK_SLIST_NEXT(tlp, lp_entries) == NULL ||
732 ((struct lagg_port*)CK_SLIST_NEXT(tlp, lp_entries))->lp_ifp->if_index >
733 ifp->if_index))
734 break;
735 }
736 LAGG_RUNLOCK();
737 if (tlp != NULL)
738 CK_SLIST_INSERT_AFTER(tlp, lp, lp_entries);
739 else
740 CK_SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
741 sc->sc_count++;
742
743 lagg_setmulti(lp);
744
745
746 if ((error = lagg_proto_addport(sc, lp)) != 0) {
747 /* Remove the port, without calling pr_delport. */
748 lagg_port_destroy(lp, 0);
749 return (error);
750 }
751
752 /* Update lagg capabilities */
753 lagg_capabilities(sc);
754 lagg_linkstate(sc);
755
756 return (0);
757 }
758
759 #ifdef LAGG_PORT_STACKING
760 static int
761 lagg_port_checkstacking(struct lagg_softc *sc)
762 {
763 struct lagg_softc *sc_ptr;
764 struct lagg_port *lp;
765 int m = 0;
766
767 LAGG_SXLOCK_ASSERT(sc);
768 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
769 if (lp->lp_flags & LAGG_PORT_STACK) {
770 sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
771 m = MAX(m, lagg_port_checkstacking(sc_ptr));
772 }
773 }
774
775 return (m + 1);
776 }
777 #endif
778
779 static void
780 lagg_port_destroy_cb(epoch_context_t ec)
781 {
782 struct lagg_port *lp;
783 struct ifnet *ifp;
784
785 lp = __containerof(ec, struct lagg_port, lp_epoch_ctx);
786 ifp = lp->lp_ifp;
787
788 if_rele(ifp);
789 free(lp, M_DEVBUF);
790 }
791
792 static int
793 lagg_port_destroy(struct lagg_port *lp, int rundelport)
794 {
795 struct lagg_softc *sc = lp->lp_softc;
796 struct lagg_port *lp_ptr, *lp0;
797 struct ifnet *ifp = lp->lp_ifp;
798 uint64_t *pval, vdiff;
799 int i;
800
801 LAGG_XLOCK_ASSERT(sc);
802
803 if (rundelport)
804 lagg_proto_delport(sc, lp);
805
806 if (lp->lp_detaching == 0)
807 lagg_clrmulti(lp);
808
809 /* Restore interface */
810 ifp->if_type = lp->lp_iftype;
811 ifp->if_ioctl = lp->lp_ioctl;
812 ifp->if_output = lp->lp_output;
813 ifp->if_lagg = NULL;
814
815 /* Update detached port counters */
816 pval = lp->port_counters.val;
817 for (i = 0; i < IFCOUNTERS; i++, pval++) {
818 vdiff = ifp->if_get_counter(ifp, i) - *pval;
819 sc->detached_counters.val[i] += vdiff;
820 }
821
822 /* Finally, remove the port from the lagg */
823 CK_SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
824 sc->sc_count--;
825
826 /* Update the primary interface */
827 if (lp == sc->sc_primary) {
828 uint8_t lladdr[ETHER_ADDR_LEN];
829
830 if ((lp0 = CK_SLIST_FIRST(&sc->sc_ports)) == NULL)
831 bzero(&lladdr, ETHER_ADDR_LEN);
832 else
833 bcopy(lp0->lp_lladdr, lladdr, ETHER_ADDR_LEN);
834 sc->sc_primary = lp0;
835 if (sc->sc_destroying == 0) {
836 bcopy(lladdr, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
837 lagg_proto_lladdr(sc);
838 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
839 }
840
841 /*
842 * Update lladdr for each port (new primary needs update
843 * as well, to switch from old lladdr to its 'real' one)
844 */
845 CK_SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
846 if_setlladdr(lp_ptr->lp_ifp, lladdr, ETHER_ADDR_LEN);
847 }
848
849 if (lp->lp_ifflags)
850 if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
851
852 if (lp->lp_detaching == 0) {
853 lagg_setflags(lp, 0);
854 lagg_setcaps(lp, lp->lp_ifcapenable);
855 if_setlladdr(ifp, lp->lp_lladdr, ETHER_ADDR_LEN);
856 }
857
858 /*
859 * free port and release it's ifnet reference after a grace period has
860 * elapsed.
861 */
862 epoch_call(net_epoch_preempt, &lp->lp_epoch_ctx, lagg_port_destroy_cb);
863 /* Update lagg capabilities */
864 lagg_capabilities(sc);
865 lagg_linkstate(sc);
866
867 return (0);
868 }
869
870 static int
871 lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
872 {
873 struct lagg_reqport *rp = (struct lagg_reqport *)data;
874 struct lagg_softc *sc;
875 struct lagg_port *lp = NULL;
876 int error = 0;
877
878 /* Should be checked by the caller */
879 if (ifp->if_type != IFT_IEEE8023ADLAG ||
880 (lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
881 goto fallback;
882
883 switch (cmd) {
884 case SIOCGLAGGPORT:
885 if (rp->rp_portname[0] == '\0' ||
886 ifunit(rp->rp_portname) != ifp) {
887 error = EINVAL;
888 break;
889 }
890
891 LAGG_RLOCK();
892 if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
893 error = ENOENT;
894 LAGG_RUNLOCK();
895 break;
896 }
897
898 lagg_port2req(lp, rp);
899 LAGG_RUNLOCK();
900 break;
901
902 case SIOCSIFCAP:
903 if (lp->lp_ioctl == NULL) {
904 error = EINVAL;
905 break;
906 }
907 error = (*lp->lp_ioctl)(ifp, cmd, data);
908 if (error)
909 break;
910
911 /* Update lagg interface capabilities */
912 LAGG_XLOCK(sc);
913 lagg_capabilities(sc);
914 LAGG_XUNLOCK(sc);
915 VLAN_CAPABILITIES(sc->sc_ifp);
916 break;
917
918 case SIOCSIFMTU:
919 /* Do not allow the MTU to be changed once joined */
920 error = EINVAL;
921 break;
922
923 default:
924 goto fallback;
925 }
926
927 return (error);
928
929 fallback:
930 if (lp != NULL && lp->lp_ioctl != NULL)
931 return ((*lp->lp_ioctl)(ifp, cmd, data));
932
933 return (EINVAL);
934 }
935
936 /*
937 * Requests counter @cnt data.
938 *
939 * Counter value is calculated the following way:
940 * 1) for each port, sum difference between current and "initial" measurements.
941 * 2) add lagg logical interface counters.
942 * 3) add data from detached_counters array.
943 *
944 * We also do the following things on ports attach/detach:
945 * 1) On port attach we store all counters it has into port_counter array.
946 * 2) On port detach we add the different between "initial" and
947 * current counters data to detached_counters array.
948 */
949 static uint64_t
950 lagg_get_counter(struct ifnet *ifp, ift_counter cnt)
951 {
952 struct lagg_softc *sc;
953 struct lagg_port *lp;
954 struct ifnet *lpifp;
955 uint64_t newval, oldval, vsum;
956
957 /* Revise this when we've got non-generic counters. */
958 KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt));
959
960 sc = (struct lagg_softc *)ifp->if_softc;
961
962 vsum = 0;
963 LAGG_RLOCK();
964 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
965 /* Saved attached value */
966 oldval = lp->port_counters.val[cnt];
967 /* current value */
968 lpifp = lp->lp_ifp;
969 newval = lpifp->if_get_counter(lpifp, cnt);
970 /* Calculate diff and save new */
971 vsum += newval - oldval;
972 }
973 LAGG_RUNLOCK();
974
975 /*
976 * Add counter data which might be added by upper
977 * layer protocols operating on logical interface.
978 */
979 vsum += if_get_counter_default(ifp, cnt);
980
981 /*
982 * Add counter data from detached ports counters
983 */
984 vsum += sc->detached_counters.val[cnt];
985
986
987 return (vsum);
988 }
989
990 /*
991 * For direct output to child ports.
992 */
993 static int
994 lagg_port_output(struct ifnet *ifp, struct mbuf *m,
995 const struct sockaddr *dst, struct route *ro)
996 {
997 struct lagg_port *lp = ifp->if_lagg;
998
999 switch (dst->sa_family) {
1000 case pseudo_AF_HDRCMPLT:
1001 case AF_UNSPEC:
1002 return ((*lp->lp_output)(ifp, m, dst, ro));
1003 }
1004
1005 /* drop any other frames */
1006 m_freem(m);
1007 return (ENETDOWN);
1008 }
1009
1010 static void
1011 lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
1012 {
1013 struct lagg_port *lp;
1014 struct lagg_softc *sc;
1015
1016 if ((lp = ifp->if_lagg) == NULL)
1017 return;
1018 /* If the ifnet is just being renamed, don't do anything. */
1019 if (ifp->if_flags & IFF_RENAMING)
1020 return;
1021
1022 sc = lp->lp_softc;
1023
1024 LAGG_XLOCK(sc);
1025 lp->lp_detaching = 1;
1026 lagg_port_destroy(lp, 1);
1027 LAGG_XUNLOCK(sc);
1028 VLAN_CAPABILITIES(sc->sc_ifp);
1029 }
1030
1031 static void
1032 lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
1033 {
1034 struct lagg_softc *sc = lp->lp_softc;
1035
1036 strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
1037 strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
1038 rp->rp_prio = lp->lp_prio;
1039 rp->rp_flags = lp->lp_flags;
1040 lagg_proto_portreq(sc, lp, &rp->rp_psc);
1041
1042 /* Add protocol specific flags */
1043 switch (sc->sc_proto) {
1044 case LAGG_PROTO_FAILOVER:
1045 if (lp == sc->sc_primary)
1046 rp->rp_flags |= LAGG_PORT_MASTER;
1047 if (lp == lagg_link_active(sc, sc->sc_primary))
1048 rp->rp_flags |= LAGG_PORT_ACTIVE;
1049 break;
1050
1051 case LAGG_PROTO_ROUNDROBIN:
1052 case LAGG_PROTO_LOADBALANCE:
1053 case LAGG_PROTO_BROADCAST:
1054 if (LAGG_PORTACTIVE(lp))
1055 rp->rp_flags |= LAGG_PORT_ACTIVE;
1056 break;
1057
1058 case LAGG_PROTO_LACP:
1059 /* LACP has a different definition of active */
1060 if (lacp_isactive(lp))
1061 rp->rp_flags |= LAGG_PORT_ACTIVE;
1062 if (lacp_iscollecting(lp))
1063 rp->rp_flags |= LAGG_PORT_COLLECTING;
1064 if (lacp_isdistributing(lp))
1065 rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
1066 break;
1067 }
1068
1069 }
1070
1071 static void
1072 lagg_init(void *xsc)
1073 {
1074 struct lagg_softc *sc = (struct lagg_softc *)xsc;
1075 struct ifnet *ifp = sc->sc_ifp;
1076 struct lagg_port *lp;
1077
1078 LAGG_XLOCK(sc);
1079 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1080 LAGG_XUNLOCK(sc);
1081 return;
1082 }
1083
1084 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1085
1086 /*
1087 * Update the port lladdrs if needed.
1088 * This might be if_setlladdr() notification
1089 * that lladdr has been changed.
1090 */
1091 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1092 if (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp->lp_ifp),
1093 ETHER_ADDR_LEN) != 0)
1094 if_setlladdr(lp->lp_ifp, IF_LLADDR(ifp), ETHER_ADDR_LEN);
1095 }
1096
1097 lagg_proto_init(sc);
1098
1099 LAGG_XUNLOCK(sc);
1100 }
1101
1102 static void
1103 lagg_stop(struct lagg_softc *sc)
1104 {
1105 struct ifnet *ifp = sc->sc_ifp;
1106
1107 LAGG_XLOCK_ASSERT(sc);
1108
1109 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1110 return;
1111
1112 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1113
1114 lagg_proto_stop(sc);
1115 }
1116
1117 static int
1118 lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1119 {
1120 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1121 struct lagg_reqall *ra = (struct lagg_reqall *)data;
1122 struct lagg_reqopts *ro = (struct lagg_reqopts *)data;
1123 struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
1124 struct lagg_reqflags *rf = (struct lagg_reqflags *)data;
1125 struct ifreq *ifr = (struct ifreq *)data;
1126 struct lagg_port *lp;
1127 struct ifnet *tpif;
1128 struct thread *td = curthread;
1129 char *buf, *outbuf;
1130 int count, buflen, len, error = 0;
1131
1132 bzero(&rpbuf, sizeof(rpbuf));
1133
1134 switch (cmd) {
1135 case SIOCGLAGG:
1136 LAGG_XLOCK(sc);
1137 buflen = sc->sc_count * sizeof(struct lagg_reqport);
1138 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1139 ra->ra_proto = sc->sc_proto;
1140 lagg_proto_request(sc, &ra->ra_psc);
1141 count = 0;
1142 buf = outbuf;
1143 len = min(ra->ra_size, buflen);
1144 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1145 if (len < sizeof(rpbuf))
1146 break;
1147
1148 lagg_port2req(lp, &rpbuf);
1149 memcpy(buf, &rpbuf, sizeof(rpbuf));
1150 count++;
1151 buf += sizeof(rpbuf);
1152 len -= sizeof(rpbuf);
1153 }
1154 LAGG_XUNLOCK(sc);
1155 ra->ra_ports = count;
1156 ra->ra_size = count * sizeof(rpbuf);
1157 error = copyout(outbuf, ra->ra_port, ra->ra_size);
1158 free(outbuf, M_TEMP);
1159 break;
1160 case SIOCSLAGG:
1161 error = priv_check(td, PRIV_NET_LAGG);
1162 if (error)
1163 break;
1164 if (ra->ra_proto >= LAGG_PROTO_MAX) {
1165 error = EPROTONOSUPPORT;
1166 break;
1167 }
1168
1169 LAGG_XLOCK(sc);
1170 lagg_proto_detach(sc);
1171 LAGG_UNLOCK_ASSERT();
1172 lagg_proto_attach(sc, ra->ra_proto);
1173 LAGG_XUNLOCK(sc);
1174 break;
1175 case SIOCGLAGGOPTS:
1176 LAGG_XLOCK(sc);
1177 ro->ro_opts = sc->sc_opts;
1178 if (sc->sc_proto == LAGG_PROTO_LACP) {
1179 struct lacp_softc *lsc;
1180
1181 lsc = (struct lacp_softc *)sc->sc_psc;
1182 if (lsc->lsc_debug.lsc_tx_test != 0)
1183 ro->ro_opts |= LAGG_OPT_LACP_TXTEST;
1184 if (lsc->lsc_debug.lsc_rx_test != 0)
1185 ro->ro_opts |= LAGG_OPT_LACP_RXTEST;
1186 if (lsc->lsc_strict_mode != 0)
1187 ro->ro_opts |= LAGG_OPT_LACP_STRICT;
1188 if (lsc->lsc_fast_timeout != 0)
1189 ro->ro_opts |= LAGG_OPT_LACP_TIMEOUT;
1190
1191 ro->ro_active = sc->sc_active;
1192 } else {
1193 ro->ro_active = 0;
1194 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1195 ro->ro_active += LAGG_PORTACTIVE(lp);
1196 }
1197 ro->ro_bkt = sc->sc_bkt;
1198 ro->ro_flapping = sc->sc_flapping;
1199 ro->ro_flowid_shift = sc->flowid_shift;
1200 LAGG_XUNLOCK(sc);
1201 break;
1202 case SIOCSLAGGOPTS:
1203 if (sc->sc_proto == LAGG_PROTO_ROUNDROBIN) {
1204 if (ro->ro_bkt == 0)
1205 sc->sc_bkt = 1; // Minimum 1 packet per iface.
1206 else
1207 sc->sc_bkt = ro->ro_bkt;
1208 }
1209 error = priv_check(td, PRIV_NET_LAGG);
1210 if (error)
1211 break;
1212 if (ro->ro_opts == 0)
1213 break;
1214 /*
1215 * Set options. LACP options are stored in sc->sc_psc,
1216 * not in sc_opts.
1217 */
1218 int valid, lacp;
1219
1220 switch (ro->ro_opts) {
1221 case LAGG_OPT_USE_FLOWID:
1222 case -LAGG_OPT_USE_FLOWID:
1223 case LAGG_OPT_FLOWIDSHIFT:
1224 valid = 1;
1225 lacp = 0;
1226 break;
1227 case LAGG_OPT_LACP_TXTEST:
1228 case -LAGG_OPT_LACP_TXTEST:
1229 case LAGG_OPT_LACP_RXTEST:
1230 case -LAGG_OPT_LACP_RXTEST:
1231 case LAGG_OPT_LACP_STRICT:
1232 case -LAGG_OPT_LACP_STRICT:
1233 case LAGG_OPT_LACP_TIMEOUT:
1234 case -LAGG_OPT_LACP_TIMEOUT:
1235 valid = lacp = 1;
1236 break;
1237 default:
1238 valid = lacp = 0;
1239 break;
1240 }
1241
1242 LAGG_XLOCK(sc);
1243
1244 if (valid == 0 ||
1245 (lacp == 1 && sc->sc_proto != LAGG_PROTO_LACP)) {
1246 /* Invalid combination of options specified. */
1247 error = EINVAL;
1248 LAGG_XUNLOCK(sc);
1249 break; /* Return from SIOCSLAGGOPTS. */
1250 }
1251 /*
1252 * Store new options into sc->sc_opts except for
1253 * FLOWIDSHIFT and LACP options.
1254 */
1255 if (lacp == 0) {
1256 if (ro->ro_opts == LAGG_OPT_FLOWIDSHIFT)
1257 sc->flowid_shift = ro->ro_flowid_shift;
1258 else if (ro->ro_opts > 0)
1259 sc->sc_opts |= ro->ro_opts;
1260 else
1261 sc->sc_opts &= ~ro->ro_opts;
1262 } else {
1263 struct lacp_softc *lsc;
1264 struct lacp_port *lp;
1265
1266 lsc = (struct lacp_softc *)sc->sc_psc;
1267
1268 switch (ro->ro_opts) {
1269 case LAGG_OPT_LACP_TXTEST:
1270 lsc->lsc_debug.lsc_tx_test = 1;
1271 break;
1272 case -LAGG_OPT_LACP_TXTEST:
1273 lsc->lsc_debug.lsc_tx_test = 0;
1274 break;
1275 case LAGG_OPT_LACP_RXTEST:
1276 lsc->lsc_debug.lsc_rx_test = 1;
1277 break;
1278 case -LAGG_OPT_LACP_RXTEST:
1279 lsc->lsc_debug.lsc_rx_test = 0;
1280 break;
1281 case LAGG_OPT_LACP_STRICT:
1282 lsc->lsc_strict_mode = 1;
1283 break;
1284 case -LAGG_OPT_LACP_STRICT:
1285 lsc->lsc_strict_mode = 0;
1286 break;
1287 case LAGG_OPT_LACP_TIMEOUT:
1288 LACP_LOCK(lsc);
1289 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
1290 lp->lp_state |= LACP_STATE_TIMEOUT;
1291 LACP_UNLOCK(lsc);
1292 lsc->lsc_fast_timeout = 1;
1293 break;
1294 case -LAGG_OPT_LACP_TIMEOUT:
1295 LACP_LOCK(lsc);
1296 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
1297 lp->lp_state &= ~LACP_STATE_TIMEOUT;
1298 LACP_UNLOCK(lsc);
1299 lsc->lsc_fast_timeout = 0;
1300 break;
1301 }
1302 }
1303 LAGG_XUNLOCK(sc);
1304 break;
1305 case SIOCGLAGGFLAGS:
1306 rf->rf_flags = 0;
1307 LAGG_XLOCK(sc);
1308 if (sc->sc_flags & MBUF_HASHFLAG_L2)
1309 rf->rf_flags |= LAGG_F_HASHL2;
1310 if (sc->sc_flags & MBUF_HASHFLAG_L3)
1311 rf->rf_flags |= LAGG_F_HASHL3;
1312 if (sc->sc_flags & MBUF_HASHFLAG_L4)
1313 rf->rf_flags |= LAGG_F_HASHL4;
1314 LAGG_XUNLOCK(sc);
1315 break;
1316 case SIOCSLAGGHASH:
1317 error = priv_check(td, PRIV_NET_LAGG);
1318 if (error)
1319 break;
1320 if ((rf->rf_flags & LAGG_F_HASHMASK) == 0) {
1321 error = EINVAL;
1322 break;
1323 }
1324 LAGG_XLOCK(sc);
1325 sc->sc_flags = 0;
1326 if (rf->rf_flags & LAGG_F_HASHL2)
1327 sc->sc_flags |= MBUF_HASHFLAG_L2;
1328 if (rf->rf_flags & LAGG_F_HASHL3)
1329 sc->sc_flags |= MBUF_HASHFLAG_L3;
1330 if (rf->rf_flags & LAGG_F_HASHL4)
1331 sc->sc_flags |= MBUF_HASHFLAG_L4;
1332 LAGG_XUNLOCK(sc);
1333 break;
1334 case SIOCGLAGGPORT:
1335 if (rp->rp_portname[0] == '\0' ||
1336 (tpif = ifunit_ref(rp->rp_portname)) == NULL) {
1337 error = EINVAL;
1338 break;
1339 }
1340
1341 LAGG_RLOCK();
1342 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1343 lp->lp_softc != sc) {
1344 error = ENOENT;
1345 LAGG_RUNLOCK();
1346 if_rele(tpif);
1347 break;
1348 }
1349
1350 lagg_port2req(lp, rp);
1351 LAGG_RUNLOCK();
1352 if_rele(tpif);
1353 break;
1354 case SIOCSLAGGPORT:
1355 error = priv_check(td, PRIV_NET_LAGG);
1356 if (error)
1357 break;
1358 if (rp->rp_portname[0] == '\0' ||
1359 (tpif = ifunit_ref(rp->rp_portname)) == NULL) {
1360 error = EINVAL;
1361 break;
1362 }
1363 #ifdef INET6
1364 /*
1365 * A laggport interface should not have inet6 address
1366 * because two interfaces with a valid link-local
1367 * scope zone must not be merged in any form. This
1368 * restriction is needed to prevent violation of
1369 * link-local scope zone. Attempts to add a laggport
1370 * interface which has inet6 addresses triggers
1371 * removal of all inet6 addresses on the member
1372 * interface.
1373 */
1374 if (in6ifa_llaonifp(tpif)) {
1375 in6_ifdetach(tpif);
1376 if_printf(sc->sc_ifp,
1377 "IPv6 addresses on %s have been removed "
1378 "before adding it as a member to prevent "
1379 "IPv6 address scope violation.\n",
1380 tpif->if_xname);
1381 }
1382 #endif
1383 LAGG_XLOCK(sc);
1384 error = lagg_port_create(sc, tpif);
1385 LAGG_XUNLOCK(sc);
1386 if_rele(tpif);
1387 VLAN_CAPABILITIES(ifp);
1388 break;
1389 case SIOCSLAGGDELPORT:
1390 error = priv_check(td, PRIV_NET_LAGG);
1391 if (error)
1392 break;
1393 if (rp->rp_portname[0] == '\0' ||
1394 (tpif = ifunit_ref(rp->rp_portname)) == NULL) {
1395 error = EINVAL;
1396 break;
1397 }
1398
1399 LAGG_XLOCK(sc);
1400 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1401 lp->lp_softc != sc) {
1402 error = ENOENT;
1403 LAGG_XUNLOCK(sc);
1404 if_rele(tpif);
1405 break;
1406 }
1407
1408 error = lagg_port_destroy(lp, 1);
1409 LAGG_XUNLOCK(sc);
1410 if_rele(tpif);
1411 VLAN_CAPABILITIES(ifp);
1412 break;
1413 case SIOCSIFFLAGS:
1414 /* Set flags on ports too */
1415 LAGG_XLOCK(sc);
1416 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1417 lagg_setflags(lp, 1);
1418 }
1419
1420 if (!(ifp->if_flags & IFF_UP) &&
1421 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1422 /*
1423 * If interface is marked down and it is running,
1424 * then stop and disable it.
1425 */
1426 lagg_stop(sc);
1427 LAGG_XUNLOCK(sc);
1428 } else if ((ifp->if_flags & IFF_UP) &&
1429 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1430 /*
1431 * If interface is marked up and it is stopped, then
1432 * start it.
1433 */
1434 LAGG_XUNLOCK(sc);
1435 (*ifp->if_init)(sc);
1436 } else
1437 LAGG_XUNLOCK(sc);
1438 break;
1439 case SIOCADDMULTI:
1440 case SIOCDELMULTI:
1441 LAGG_XLOCK(sc);
1442 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1443 lagg_clrmulti(lp);
1444 lagg_setmulti(lp);
1445 }
1446 LAGG_XUNLOCK(sc);
1447 error = 0;
1448 break;
1449 case SIOCSIFMEDIA:
1450 case SIOCGIFMEDIA:
1451 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1452 break;
1453
1454 case SIOCSIFCAP:
1455 LAGG_XLOCK(sc);
1456 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1457 if (lp->lp_ioctl != NULL)
1458 (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
1459 }
1460 lagg_capabilities(sc);
1461 LAGG_XUNLOCK(sc);
1462 VLAN_CAPABILITIES(ifp);
1463 error = 0;
1464 break;
1465
1466 case SIOCSIFMTU:
1467 /* Do not allow the MTU to be directly changed */
1468 error = EINVAL;
1469 break;
1470
1471 default:
1472 error = ether_ioctl(ifp, cmd, data);
1473 break;
1474 }
1475 return (error);
1476 }
1477
1478 #ifdef RATELIMIT
1479 static int
1480 lagg_snd_tag_alloc(struct ifnet *ifp,
1481 union if_snd_tag_alloc_params *params,
1482 struct m_snd_tag **ppmt)
1483 {
1484 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1485 struct lagg_port *lp;
1486 struct lagg_lb *lb;
1487 uint32_t p;
1488
1489 switch (sc->sc_proto) {
1490 case LAGG_PROTO_FAILOVER:
1491 lp = lagg_link_active(sc, sc->sc_primary);
1492 break;
1493 case LAGG_PROTO_LOADBALANCE:
1494 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
1495 params->hdr.flowtype == M_HASHTYPE_NONE)
1496 return (EOPNOTSUPP);
1497 p = params->hdr.flowid >> sc->flowid_shift;
1498 p %= sc->sc_count;
1499 lb = (struct lagg_lb *)sc->sc_psc;
1500 lp = lb->lb_ports[p];
1501 lp = lagg_link_active(sc, lp);
1502 break;
1503 case LAGG_PROTO_LACP:
1504 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
1505 params->hdr.flowtype == M_HASHTYPE_NONE)
1506 return (EOPNOTSUPP);
1507 lp = lacp_select_tx_port_by_hash(sc, params->hdr.flowid);
1508 break;
1509 default:
1510 return (EOPNOTSUPP);
1511 }
1512 if (lp == NULL)
1513 return (EOPNOTSUPP);
1514 ifp = lp->lp_ifp;
1515 if (ifp == NULL || ifp->if_snd_tag_alloc == NULL ||
1516 (ifp->if_capenable & IFCAP_TXRTLMT) == 0)
1517 return (EOPNOTSUPP);
1518
1519 /* forward allocation request */
1520 return (ifp->if_snd_tag_alloc(ifp, params, ppmt));
1521 }
1522 #endif
1523
1524 static int
1525 lagg_setmulti(struct lagg_port *lp)
1526 {
1527 struct lagg_softc *sc = lp->lp_softc;
1528 struct ifnet *ifp = lp->lp_ifp;
1529 struct ifnet *scifp = sc->sc_ifp;
1530 struct lagg_mc *mc;
1531 struct ifmultiaddr *ifma;
1532 int error;
1533
1534 IF_ADDR_WLOCK(scifp);
1535 CK_STAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
1536 if (ifma->ifma_addr->sa_family != AF_LINK)
1537 continue;
1538 mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT);
1539 if (mc == NULL) {
1540 IF_ADDR_WUNLOCK(scifp);
1541 return (ENOMEM);
1542 }
1543 bcopy(ifma->ifma_addr, &mc->mc_addr,
1544 ifma->ifma_addr->sa_len);
1545 mc->mc_addr.sdl_index = ifp->if_index;
1546 mc->mc_ifma = NULL;
1547 SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
1548 }
1549 IF_ADDR_WUNLOCK(scifp);
1550 SLIST_FOREACH (mc, &lp->lp_mc_head, mc_entries) {
1551 error = if_addmulti(ifp,
1552 (struct sockaddr *)&mc->mc_addr, &mc->mc_ifma);
1553 if (error)
1554 return (error);
1555 }
1556 return (0);
1557 }
1558
1559 static int
1560 lagg_clrmulti(struct lagg_port *lp)
1561 {
1562 struct lagg_mc *mc;
1563
1564 LAGG_XLOCK_ASSERT(lp->lp_softc);
1565 while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
1566 SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
1567 if (mc->mc_ifma && lp->lp_detaching == 0)
1568 if_delmulti_ifma(mc->mc_ifma);
1569 free(mc, M_DEVBUF);
1570 }
1571 return (0);
1572 }
1573
1574 static int
1575 lagg_setcaps(struct lagg_port *lp, int cap)
1576 {
1577 struct ifreq ifr;
1578
1579 if (lp->lp_ifp->if_capenable == cap)
1580 return (0);
1581 if (lp->lp_ioctl == NULL)
1582 return (ENXIO);
1583 ifr.ifr_reqcap = cap;
1584 return ((*lp->lp_ioctl)(lp->lp_ifp, SIOCSIFCAP, (caddr_t)&ifr));
1585 }
1586
1587 /* Handle a ref counted flag that should be set on the lagg port as well */
1588 static int
1589 lagg_setflag(struct lagg_port *lp, int flag, int status,
1590 int (*func)(struct ifnet *, int))
1591 {
1592 struct lagg_softc *sc = lp->lp_softc;
1593 struct ifnet *scifp = sc->sc_ifp;
1594 struct ifnet *ifp = lp->lp_ifp;
1595 int error;
1596
1597 LAGG_XLOCK_ASSERT(sc);
1598
1599 status = status ? (scifp->if_flags & flag) : 0;
1600 /* Now "status" contains the flag value or 0 */
1601
1602 /*
1603 * See if recorded ports status is different from what
1604 * we want it to be. If it is, flip it. We record ports
1605 * status in lp_ifflags so that we won't clear ports flag
1606 * we haven't set. In fact, we don't clear or set ports
1607 * flags directly, but get or release references to them.
1608 * That's why we can be sure that recorded flags still are
1609 * in accord with actual ports flags.
1610 */
1611 if (status != (lp->lp_ifflags & flag)) {
1612 error = (*func)(ifp, status);
1613 if (error)
1614 return (error);
1615 lp->lp_ifflags &= ~flag;
1616 lp->lp_ifflags |= status;
1617 }
1618 return (0);
1619 }
1620
1621 /*
1622 * Handle IFF_* flags that require certain changes on the lagg port
1623 * if "status" is true, update ports flags respective to the lagg
1624 * if "status" is false, forcedly clear the flags set on port.
1625 */
1626 static int
1627 lagg_setflags(struct lagg_port *lp, int status)
1628 {
1629 int error, i;
1630
1631 for (i = 0; lagg_pflags[i].flag; i++) {
1632 error = lagg_setflag(lp, lagg_pflags[i].flag,
1633 status, lagg_pflags[i].func);
1634 if (error)
1635 return (error);
1636 }
1637 return (0);
1638 }
1639
1640 static int
1641 lagg_transmit(struct ifnet *ifp, struct mbuf *m)
1642 {
1643 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1644 int error;
1645
1646 LAGG_RLOCK();
1647 /* We need a Tx algorithm and at least one port */
1648 if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
1649 LAGG_RUNLOCK();
1650 m_freem(m);
1651 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1652 return (ENXIO);
1653 }
1654
1655 ETHER_BPF_MTAP(ifp, m);
1656
1657 error = lagg_proto_start(sc, m);
1658 LAGG_RUNLOCK();
1659
1660 if (error != 0)
1661 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1662
1663 return (error);
1664 }
1665
1666 /*
1667 * The ifp->if_qflush entry point for lagg(4) is no-op.
1668 */
1669 static void
1670 lagg_qflush(struct ifnet *ifp __unused)
1671 {
1672 }
1673
1674 static struct mbuf *
1675 lagg_input(struct ifnet *ifp, struct mbuf *m)
1676 {
1677 struct lagg_port *lp = ifp->if_lagg;
1678 struct lagg_softc *sc = lp->lp_softc;
1679 struct ifnet *scifp = sc->sc_ifp;
1680
1681 LAGG_RLOCK();
1682 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1683 lp->lp_detaching != 0 ||
1684 sc->sc_proto == LAGG_PROTO_NONE) {
1685 LAGG_RUNLOCK();
1686 m_freem(m);
1687 return (NULL);
1688 }
1689
1690 ETHER_BPF_MTAP(scifp, m);
1691
1692 m = lagg_proto_input(sc, lp, m);
1693 if (m != NULL && (scifp->if_flags & IFF_MONITOR) != 0) {
1694 m_freem(m);
1695 m = NULL;
1696 }
1697
1698 LAGG_RUNLOCK();
1699 return (m);
1700 }
1701
1702 static int
1703 lagg_media_change(struct ifnet *ifp)
1704 {
1705 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1706
1707 if (sc->sc_ifflags & IFF_DEBUG)
1708 printf("%s\n", __func__);
1709
1710 /* Ignore */
1711 return (0);
1712 }
1713
1714 static void
1715 lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1716 {
1717 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1718 struct lagg_port *lp;
1719
1720 imr->ifm_status = IFM_AVALID;
1721 imr->ifm_active = IFM_ETHER | IFM_AUTO;
1722
1723 LAGG_RLOCK();
1724 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1725 if (LAGG_PORTACTIVE(lp))
1726 imr->ifm_status |= IFM_ACTIVE;
1727 }
1728 LAGG_RUNLOCK();
1729 }
1730
1731 static void
1732 lagg_linkstate(struct lagg_softc *sc)
1733 {
1734 struct lagg_port *lp;
1735 int new_link = LINK_STATE_DOWN;
1736 uint64_t speed;
1737
1738 LAGG_XLOCK_ASSERT(sc);
1739
1740 /* LACP handles link state itself */
1741 if (sc->sc_proto == LAGG_PROTO_LACP)
1742 return;
1743
1744 /* Our link is considered up if at least one of our ports is active */
1745 LAGG_RLOCK();
1746 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1747 if (lp->lp_ifp->if_link_state == LINK_STATE_UP) {
1748 new_link = LINK_STATE_UP;
1749 break;
1750 }
1751 }
1752 LAGG_RUNLOCK();
1753 if_link_state_change(sc->sc_ifp, new_link);
1754
1755 /* Update if_baudrate to reflect the max possible speed */
1756 switch (sc->sc_proto) {
1757 case LAGG_PROTO_FAILOVER:
1758 sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ?
1759 sc->sc_primary->lp_ifp->if_baudrate : 0;
1760 break;
1761 case LAGG_PROTO_ROUNDROBIN:
1762 case LAGG_PROTO_LOADBALANCE:
1763 case LAGG_PROTO_BROADCAST:
1764 speed = 0;
1765 LAGG_RLOCK();
1766 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1767 speed += lp->lp_ifp->if_baudrate;
1768 LAGG_RUNLOCK();
1769 sc->sc_ifp->if_baudrate = speed;
1770 break;
1771 case LAGG_PROTO_LACP:
1772 /* LACP updates if_baudrate itself */
1773 break;
1774 }
1775 }
1776
1777 static void
1778 lagg_port_state(struct ifnet *ifp, int state)
1779 {
1780 struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
1781 struct lagg_softc *sc = NULL;
1782
1783 if (lp != NULL)
1784 sc = lp->lp_softc;
1785 if (sc == NULL)
1786 return;
1787
1788 LAGG_XLOCK(sc);
1789 lagg_linkstate(sc);
1790 lagg_proto_linkstate(sc, lp);
1791 LAGG_XUNLOCK(sc);
1792 }
1793
1794 struct lagg_port *
1795 lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
1796 {
1797 struct lagg_port *lp_next, *rval = NULL;
1798 struct epoch_tracker net_et;
1799
1800 /*
1801 * Search a port which reports an active link state.
1802 */
1803
1804 if (lp == NULL)
1805 goto search;
1806 if (LAGG_PORTACTIVE(lp)) {
1807 rval = lp;
1808 goto found;
1809 }
1810 if ((lp_next = CK_SLIST_NEXT(lp, lp_entries)) != NULL &&
1811 LAGG_PORTACTIVE(lp_next)) {
1812 rval = lp_next;
1813 goto found;
1814 }
1815
1816 search:
1817 epoch_enter_preempt(net_epoch_preempt, &net_et);
1818 CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1819 if (LAGG_PORTACTIVE(lp_next)) {
1820 epoch_exit_preempt(net_epoch_preempt, &net_et);
1821 return (lp_next);
1822 }
1823 }
1824 epoch_exit_preempt(net_epoch_preempt, &net_et);
1825 found:
1826 return (rval);
1827 }
1828
1829 int
1830 lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
1831 {
1832
1833 return (ifp->if_transmit)(ifp, m);
1834 }
1835
1836 /*
1837 * Simple round robin aggregation
1838 */
1839 static void
1840 lagg_rr_attach(struct lagg_softc *sc)
1841 {
1842 sc->sc_seq = 0;
1843 sc->sc_bkt_count = sc->sc_bkt;
1844 }
1845
1846 static int
1847 lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
1848 {
1849 struct lagg_port *lp;
1850 uint32_t p;
1851
1852 if (sc->sc_bkt_count == 0 && sc->sc_bkt > 0)
1853 sc->sc_bkt_count = sc->sc_bkt;
1854
1855 if (sc->sc_bkt > 0) {
1856 atomic_subtract_int(&sc->sc_bkt_count, 1);
1857 if (atomic_cmpset_int(&sc->sc_bkt_count, 0, sc->sc_bkt))
1858 p = atomic_fetchadd_32(&sc->sc_seq, 1);
1859 else
1860 p = sc->sc_seq;
1861 } else
1862 p = atomic_fetchadd_32(&sc->sc_seq, 1);
1863
1864 p %= sc->sc_count;
1865 lp = CK_SLIST_FIRST(&sc->sc_ports);
1866
1867 while (p--)
1868 lp = CK_SLIST_NEXT(lp, lp_entries);
1869
1870 /*
1871 * Check the port's link state. This will return the next active
1872 * port if the link is down or the port is NULL.
1873 */
1874 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1875 m_freem(m);
1876 return (ENETDOWN);
1877 }
1878
1879 /* Send mbuf */
1880 return (lagg_enqueue(lp->lp_ifp, m));
1881 }
1882
1883 static struct mbuf *
1884 lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1885 {
1886 struct ifnet *ifp = sc->sc_ifp;
1887
1888 /* Just pass in the packet to our lagg device */
1889 m->m_pkthdr.rcvif = ifp;
1890
1891 return (m);
1892 }
1893
1894 /*
1895 * Broadcast mode
1896 */
1897 static int
1898 lagg_bcast_start(struct lagg_softc *sc, struct mbuf *m)
1899 {
1900 int active_ports = 0;
1901 int errors = 0;
1902 int ret;
1903 struct lagg_port *lp, *last = NULL;
1904 struct mbuf *m0;
1905
1906 LAGG_RLOCK();
1907 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1908 if (!LAGG_PORTACTIVE(lp))
1909 continue;
1910
1911 active_ports++;
1912
1913 if (last != NULL) {
1914 m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
1915 if (m0 == NULL) {
1916 ret = ENOBUFS;
1917 errors++;
1918 break;
1919 }
1920
1921 ret = lagg_enqueue(last->lp_ifp, m0);
1922 if (ret != 0)
1923 errors++;
1924 }
1925 last = lp;
1926 }
1927 LAGG_RUNLOCK();
1928
1929 if (last == NULL) {
1930 m_freem(m);
1931 return (ENOENT);
1932 }
1933 if ((last = lagg_link_active(sc, last)) == NULL) {
1934 m_freem(m);
1935 return (ENETDOWN);
1936 }
1937
1938 ret = lagg_enqueue(last->lp_ifp, m);
1939 if (ret != 0)
1940 errors++;
1941
1942 if (errors == 0)
1943 return (ret);
1944
1945 return (0);
1946 }
1947
1948 static struct mbuf*
1949 lagg_bcast_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1950 {
1951 struct ifnet *ifp = sc->sc_ifp;
1952
1953 /* Just pass in the packet to our lagg device */
1954 m->m_pkthdr.rcvif = ifp;
1955 return (m);
1956 }
1957
1958 /*
1959 * Active failover
1960 */
1961 static int
1962 lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
1963 {
1964 struct lagg_port *lp;
1965
1966 /* Use the master port if active or the next available port */
1967 if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
1968 m_freem(m);
1969 return (ENETDOWN);
1970 }
1971
1972 /* Send mbuf */
1973 return (lagg_enqueue(lp->lp_ifp, m));
1974 }
1975
1976 static struct mbuf *
1977 lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1978 {
1979 struct ifnet *ifp = sc->sc_ifp;
1980 struct lagg_port *tmp_tp;
1981
1982 if (lp == sc->sc_primary || V_lagg_failover_rx_all) {
1983 m->m_pkthdr.rcvif = ifp;
1984 return (m);
1985 }
1986
1987 if (!LAGG_PORTACTIVE(sc->sc_primary)) {
1988 tmp_tp = lagg_link_active(sc, sc->sc_primary);
1989 /*
1990 * If tmp_tp is null, we've received a packet when all
1991 * our links are down. Weird, but process it anyways.
1992 */
1993 if ((tmp_tp == NULL || tmp_tp == lp)) {
1994 m->m_pkthdr.rcvif = ifp;
1995 return (m);
1996 }
1997 }
1998
1999 m_freem(m);
2000 return (NULL);
2001 }
2002
2003 /*
2004 * Loadbalancing
2005 */
2006 static void
2007 lagg_lb_attach(struct lagg_softc *sc)
2008 {
2009 struct lagg_port *lp;
2010 struct lagg_lb *lb;
2011
2012 LAGG_XLOCK_ASSERT(sc);
2013 lb = malloc(sizeof(struct lagg_lb), M_DEVBUF, M_WAITOK | M_ZERO);
2014 lb->lb_key = m_ether_tcpip_hash_init();
2015 sc->sc_psc = lb;
2016
2017 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2018 lagg_lb_port_create(lp);
2019 }
2020
2021 static void
2022 lagg_lb_detach(struct lagg_softc *sc)
2023 {
2024 struct lagg_lb *lb;
2025
2026 lb = (struct lagg_lb *)sc->sc_psc;
2027 if (lb != NULL)
2028 free(lb, M_DEVBUF);
2029 }
2030
2031 static int
2032 lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
2033 {
2034 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
2035 struct lagg_port *lp_next;
2036 int i = 0, rv;
2037
2038 rv = 0;
2039 bzero(&lb->lb_ports, sizeof(lb->lb_ports));
2040 LAGG_RLOCK();
2041 CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
2042 if (lp_next == lp)
2043 continue;
2044 if (i >= LAGG_MAX_PORTS) {
2045 rv = EINVAL;
2046 break;
2047 }
2048 if (sc->sc_ifflags & IFF_DEBUG)
2049 printf("%s: port %s at index %d\n",
2050 sc->sc_ifname, lp_next->lp_ifp->if_xname, i);
2051 lb->lb_ports[i++] = lp_next;
2052 }
2053 LAGG_RUNLOCK();
2054
2055 return (rv);
2056 }
2057
2058 static int
2059 lagg_lb_port_create(struct lagg_port *lp)
2060 {
2061 struct lagg_softc *sc = lp->lp_softc;
2062 return (lagg_lb_porttable(sc, NULL));
2063 }
2064
2065 static void
2066 lagg_lb_port_destroy(struct lagg_port *lp)
2067 {
2068 struct lagg_softc *sc = lp->lp_softc;
2069 lagg_lb_porttable(sc, lp);
2070 }
2071
2072 static int
2073 lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
2074 {
2075 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
2076 struct lagg_port *lp = NULL;
2077 uint32_t p = 0;
2078
2079 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
2080 M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2081 p = m->m_pkthdr.flowid >> sc->flowid_shift;
2082 else
2083 p = m_ether_tcpip_hash(sc->sc_flags, m, lb->lb_key);
2084 p %= sc->sc_count;
2085 lp = lb->lb_ports[p];
2086
2087 /*
2088 * Check the port's link state. This will return the next active
2089 * port if the link is down or the port is NULL.
2090 */
2091 if ((lp = lagg_link_active(sc, lp)) == NULL) {
2092 m_freem(m);
2093 return (ENETDOWN);
2094 }
2095
2096 /* Send mbuf */
2097 return (lagg_enqueue(lp->lp_ifp, m));
2098 }
2099
2100 static struct mbuf *
2101 lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2102 {
2103 struct ifnet *ifp = sc->sc_ifp;
2104
2105 /* Just pass in the packet to our lagg device */
2106 m->m_pkthdr.rcvif = ifp;
2107
2108 return (m);
2109 }
2110
2111 /*
2112 * 802.3ad LACP
2113 */
2114 static void
2115 lagg_lacp_attach(struct lagg_softc *sc)
2116 {
2117 struct lagg_port *lp;
2118
2119 lacp_attach(sc);
2120 LAGG_XLOCK_ASSERT(sc);
2121 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2122 lacp_port_create(lp);
2123 }
2124
2125 static void
2126 lagg_lacp_detach(struct lagg_softc *sc)
2127 {
2128 struct lagg_port *lp;
2129 void *psc;
2130
2131 LAGG_XLOCK_ASSERT(sc);
2132 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2133 lacp_port_destroy(lp);
2134
2135 psc = sc->sc_psc;
2136 sc->sc_psc = NULL;
2137 lacp_detach(psc);
2138 }
2139
2140 static void
2141 lagg_lacp_lladdr(struct lagg_softc *sc)
2142 {
2143 struct lagg_port *lp;
2144
2145 LAGG_SXLOCK_ASSERT(sc);
2146
2147 /* purge all the lacp ports */
2148 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2149 lacp_port_destroy(lp);
2150
2151 /* add them back in */
2152 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2153 lacp_port_create(lp);
2154 }
2155
2156 static int
2157 lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
2158 {
2159 struct lagg_port *lp;
2160
2161 lp = lacp_select_tx_port(sc, m);
2162 if (lp == NULL) {
2163 m_freem(m);
2164 return (ENETDOWN);
2165 }
2166
2167 /* Send mbuf */
2168 return (lagg_enqueue(lp->lp_ifp, m));
2169 }
2170
2171 static struct mbuf *
2172 lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2173 {
2174 struct ifnet *ifp = sc->sc_ifp;
2175 struct ether_header *eh;
2176 u_short etype;
2177
2178 eh = mtod(m, struct ether_header *);
2179 etype = ntohs(eh->ether_type);
2180
2181 /* Tap off LACP control messages */
2182 if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_SLOW) {
2183 m = lacp_input(lp, m);
2184 if (m == NULL)
2185 return (NULL);
2186 }
2187
2188 /*
2189 * If the port is not collecting or not in the active aggregator then
2190 * free and return.
2191 */
2192 if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
2193 m_freem(m);
2194 return (NULL);
2195 }
2196
2197 m->m_pkthdr.rcvif = ifp;
2198 return (m);
2199 }
2200
Cache object: 42905e8d0ae11446780855c4a23f9b3a
|