FreeBSD/Linux Kernel Cross Reference
sys/net/if_bridge.c
1 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
53 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
54 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
55 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
56 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
57 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
59 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
60 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
61 * POSSIBILITY OF SUCH DAMAGE.
62 *
63 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
64 */
65
66 /*
67 * Network interface bridge support.
68 *
69 * TODO:
70 *
71 * - Currently only supports Ethernet-like interfaces (Ethernet,
72 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
73 * to bridge other types of interfaces (FDDI-FDDI, and maybe
74 * consider heterogeneous bridges).
75 */
76
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD: releng/11.2/sys/net/if_bridge.c 332513 2018-04-15 15:22:28Z kp $");
79
80 #include "opt_inet.h"
81 #include "opt_inet6.h"
82
83 #include <sys/param.h>
84 #include <sys/eventhandler.h>
85 #include <sys/mbuf.h>
86 #include <sys/malloc.h>
87 #include <sys/protosw.h>
88 #include <sys/systm.h>
89 #include <sys/jail.h>
90 #include <sys/time.h>
91 #include <sys/socket.h> /* for net/if.h */
92 #include <sys/sockio.h>
93 #include <sys/ctype.h> /* string functions */
94 #include <sys/kernel.h>
95 #include <sys/random.h>
96 #include <sys/syslog.h>
97 #include <sys/sysctl.h>
98 #include <vm/uma.h>
99 #include <sys/module.h>
100 #include <sys/priv.h>
101 #include <sys/proc.h>
102 #include <sys/lock.h>
103 #include <sys/mutex.h>
104
105 #include <net/bpf.h>
106 #include <net/if.h>
107 #include <net/if_clone.h>
108 #include <net/if_dl.h>
109 #include <net/if_types.h>
110 #include <net/if_var.h>
111 #include <net/pfil.h>
112 #include <net/vnet.h>
113
114 #include <netinet/in.h>
115 #include <netinet/in_systm.h>
116 #include <netinet/in_var.h>
117 #include <netinet/ip.h>
118 #include <netinet/ip_var.h>
119 #ifdef INET6
120 #include <netinet/ip6.h>
121 #include <netinet6/ip6_var.h>
122 #include <netinet6/in6_ifattach.h>
123 #endif
124 #if defined(INET) || defined(INET6)
125 #include <netinet/ip_carp.h>
126 #endif
127 #include <machine/in_cksum.h>
128 #include <netinet/if_ether.h>
129 #include <net/bridgestp.h>
130 #include <net/if_bridgevar.h>
131 #include <net/if_llc.h>
132 #include <net/if_vlan_var.h>
133
134 #include <net/route.h>
135
136 /*
137 * Size of the route hash table. Must be a power of two.
138 */
139 #ifndef BRIDGE_RTHASH_SIZE
140 #define BRIDGE_RTHASH_SIZE 1024
141 #endif
142
143 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
144
145 /*
146 * Default maximum number of addresses to cache.
147 */
148 #ifndef BRIDGE_RTABLE_MAX
149 #define BRIDGE_RTABLE_MAX 2000
150 #endif
151
152 /*
153 * Timeout (in seconds) for entries learned dynamically.
154 */
155 #ifndef BRIDGE_RTABLE_TIMEOUT
156 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
157 #endif
158
159 /*
160 * Number of seconds between walks of the route list.
161 */
162 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
163 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
164 #endif
165
166 /*
167 * List of capabilities to possibly mask on the member interface.
168 */
169 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM|\
170 IFCAP_TXCSUM_IPV6)
171
172 /*
173 * List of capabilities to strip
174 */
175 #define BRIDGE_IFCAPS_STRIP IFCAP_LRO
176
177 /*
178 * Bridge interface list entry.
179 */
180 struct bridge_iflist {
181 LIST_ENTRY(bridge_iflist) bif_next;
182 struct ifnet *bif_ifp; /* member if */
183 struct bstp_port bif_stp; /* STP state */
184 uint32_t bif_flags; /* member if flags */
185 int bif_savedcaps; /* saved capabilities */
186 uint32_t bif_addrmax; /* max # of addresses */
187 uint32_t bif_addrcnt; /* cur. # of addresses */
188 uint32_t bif_addrexceeded;/* # of address violations */
189 };
190
191 /*
192 * Bridge route node.
193 */
194 struct bridge_rtnode {
195 LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */
196 LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */
197 struct bridge_iflist *brt_dst; /* destination if */
198 unsigned long brt_expire; /* expiration time */
199 uint8_t brt_flags; /* address flags */
200 uint8_t brt_addr[ETHER_ADDR_LEN];
201 uint16_t brt_vlan; /* vlan id */
202 };
203 #define brt_ifp brt_dst->bif_ifp
204
205 /*
206 * Software state for each bridge.
207 */
208 struct bridge_softc {
209 struct ifnet *sc_ifp; /* make this an interface */
210 LIST_ENTRY(bridge_softc) sc_list;
211 struct mtx sc_mtx;
212 struct cv sc_cv;
213 uint32_t sc_brtmax; /* max # of addresses */
214 uint32_t sc_brtcnt; /* cur. # of addresses */
215 uint32_t sc_brttimeout; /* rt timeout in seconds */
216 struct callout sc_brcallout; /* bridge callout */
217 uint32_t sc_iflist_ref; /* refcount for sc_iflist */
218 uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */
219 LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */
220 LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */
221 LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */
222 uint32_t sc_rthash_key; /* key for hash */
223 LIST_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */
224 struct bstp_state sc_stp; /* STP state */
225 uint32_t sc_brtexceeded; /* # of cache drops */
226 struct ifnet *sc_ifaddr; /* member mac copied from */
227 u_char sc_defaddr[6]; /* Default MAC address */
228 };
229
230 static VNET_DEFINE(struct mtx, bridge_list_mtx);
231 #define V_bridge_list_mtx VNET(bridge_list_mtx)
232 static eventhandler_tag bridge_detach_cookie;
233
234 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
235
236 uma_zone_t bridge_rtnode_zone;
237
238 static int bridge_clone_create(struct if_clone *, int, caddr_t);
239 static void bridge_clone_destroy(struct ifnet *);
240
241 static int bridge_ioctl(struct ifnet *, u_long, caddr_t);
242 static void bridge_mutecaps(struct bridge_softc *);
243 static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
244 int);
245 static void bridge_ifdetach(void *arg __unused, struct ifnet *);
246 static void bridge_init(void *);
247 static void bridge_dummynet(struct mbuf *, struct ifnet *);
248 static void bridge_stop(struct ifnet *, int);
249 static int bridge_transmit(struct ifnet *, struct mbuf *);
250 static void bridge_qflush(struct ifnet *);
251 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
252 static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
253 struct rtentry *);
254 static int bridge_enqueue(struct bridge_softc *, struct ifnet *,
255 struct mbuf *);
256 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
257
258 static void bridge_forward(struct bridge_softc *, struct bridge_iflist *,
259 struct mbuf *m);
260
261 static void bridge_timer(void *);
262
263 static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
264 struct mbuf *, int);
265 static void bridge_span(struct bridge_softc *, struct mbuf *);
266
267 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
268 uint16_t, struct bridge_iflist *, int, uint8_t);
269 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
270 uint16_t);
271 static void bridge_rttrim(struct bridge_softc *);
272 static void bridge_rtage(struct bridge_softc *);
273 static void bridge_rtflush(struct bridge_softc *, int);
274 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
275 uint16_t);
276
277 static void bridge_rtable_init(struct bridge_softc *);
278 static void bridge_rtable_fini(struct bridge_softc *);
279
280 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
281 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
282 const uint8_t *, uint16_t);
283 static int bridge_rtnode_insert(struct bridge_softc *,
284 struct bridge_rtnode *);
285 static void bridge_rtnode_destroy(struct bridge_softc *,
286 struct bridge_rtnode *);
287 static void bridge_rtable_expire(struct ifnet *, int);
288 static void bridge_state_change(struct ifnet *, int);
289
290 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
291 const char *name);
292 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
293 struct ifnet *ifp);
294 static void bridge_delete_member(struct bridge_softc *,
295 struct bridge_iflist *, int);
296 static void bridge_delete_span(struct bridge_softc *,
297 struct bridge_iflist *);
298
299 static int bridge_ioctl_add(struct bridge_softc *, void *);
300 static int bridge_ioctl_del(struct bridge_softc *, void *);
301 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
302 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
303 static int bridge_ioctl_scache(struct bridge_softc *, void *);
304 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
305 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
306 static int bridge_ioctl_rts(struct bridge_softc *, void *);
307 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
308 static int bridge_ioctl_sto(struct bridge_softc *, void *);
309 static int bridge_ioctl_gto(struct bridge_softc *, void *);
310 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
311 static int bridge_ioctl_flush(struct bridge_softc *, void *);
312 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
313 static int bridge_ioctl_spri(struct bridge_softc *, void *);
314 static int bridge_ioctl_ght(struct bridge_softc *, void *);
315 static int bridge_ioctl_sht(struct bridge_softc *, void *);
316 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
317 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
318 static int bridge_ioctl_gma(struct bridge_softc *, void *);
319 static int bridge_ioctl_sma(struct bridge_softc *, void *);
320 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
321 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
322 static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
323 static int bridge_ioctl_addspan(struct bridge_softc *, void *);
324 static int bridge_ioctl_delspan(struct bridge_softc *, void *);
325 static int bridge_ioctl_gbparam(struct bridge_softc *, void *);
326 static int bridge_ioctl_grte(struct bridge_softc *, void *);
327 static int bridge_ioctl_gifsstp(struct bridge_softc *, void *);
328 static int bridge_ioctl_sproto(struct bridge_softc *, void *);
329 static int bridge_ioctl_stxhc(struct bridge_softc *, void *);
330 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
331 int);
332 static int bridge_ip_checkbasic(struct mbuf **mp);
333 #ifdef INET6
334 static int bridge_ip6_checkbasic(struct mbuf **mp);
335 #endif /* INET6 */
336 static int bridge_fragment(struct ifnet *, struct mbuf **mp,
337 struct ether_header *, int, struct llc *);
338 static void bridge_linkstate(struct ifnet *ifp);
339 static void bridge_linkcheck(struct bridge_softc *sc);
340
341 extern void (*bridge_linkstate_p)(struct ifnet *ifp);
342
343 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
344 #define VLANTAGOF(_m) \
345 (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1
346
347 static struct bstp_cb_ops bridge_ops = {
348 .bcb_state = bridge_state_change,
349 .bcb_rtage = bridge_rtable_expire
350 };
351
352 SYSCTL_DECL(_net_link);
353 static SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
354
355 /* only pass IP[46] packets when pfil is enabled */
356 static VNET_DEFINE(int, pfil_onlyip) = 1;
357 #define V_pfil_onlyip VNET(pfil_onlyip)
358 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip,
359 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_onlyip), 0,
360 "Only pass IP packets when pfil is enabled");
361
362 /* run pfil hooks on the bridge interface */
363 static VNET_DEFINE(int, pfil_bridge) = 1;
364 #define V_pfil_bridge VNET(pfil_bridge)
365 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge,
366 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_bridge), 0,
367 "Packet filter on the bridge interface");
368
369 /* layer2 filter with ipfw */
370 static VNET_DEFINE(int, pfil_ipfw);
371 #define V_pfil_ipfw VNET(pfil_ipfw)
372
373 /* layer2 ARP filter with ipfw */
374 static VNET_DEFINE(int, pfil_ipfw_arp);
375 #define V_pfil_ipfw_arp VNET(pfil_ipfw_arp)
376 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp,
377 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_ipfw_arp), 0,
378 "Filter ARP packets through IPFW layer2");
379
380 /* run pfil hooks on the member interface */
381 static VNET_DEFINE(int, pfil_member) = 1;
382 #define V_pfil_member VNET(pfil_member)
383 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member,
384 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_member), 0,
385 "Packet filter on the member interface");
386
387 /* run pfil hooks on the physical interface for locally destined packets */
388 static VNET_DEFINE(int, pfil_local_phys);
389 #define V_pfil_local_phys VNET(pfil_local_phys)
390 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
391 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_local_phys), 0,
392 "Packet filter on the physical interface for locally destined packets");
393
394 /* log STP state changes */
395 static VNET_DEFINE(int, log_stp);
396 #define V_log_stp VNET(log_stp)
397 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp,
398 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(log_stp), 0,
399 "Log STP state changes");
400
401 /* share MAC with first bridge member */
402 static VNET_DEFINE(int, bridge_inherit_mac);
403 #define V_bridge_inherit_mac VNET(bridge_inherit_mac)
404 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
405 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(bridge_inherit_mac), 0,
406 "Inherit MAC address from the first bridge member");
407
408 static VNET_DEFINE(int, allow_llz_overlap) = 0;
409 #define V_allow_llz_overlap VNET(allow_llz_overlap)
410 SYSCTL_INT(_net_link_bridge, OID_AUTO, allow_llz_overlap,
411 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(allow_llz_overlap), 0,
412 "Allow overlap of link-local scope "
413 "zones of a bridge interface and the member interfaces");
414
415 struct bridge_control {
416 int (*bc_func)(struct bridge_softc *, void *);
417 int bc_argsize;
418 int bc_flags;
419 };
420
421 #define BC_F_COPYIN 0x01 /* copy arguments in */
422 #define BC_F_COPYOUT 0x02 /* copy arguments out */
423 #define BC_F_SUSER 0x04 /* do super-user check */
424
425 const struct bridge_control bridge_control_table[] = {
426 { bridge_ioctl_add, sizeof(struct ifbreq),
427 BC_F_COPYIN|BC_F_SUSER },
428 { bridge_ioctl_del, sizeof(struct ifbreq),
429 BC_F_COPYIN|BC_F_SUSER },
430
431 { bridge_ioctl_gifflags, sizeof(struct ifbreq),
432 BC_F_COPYIN|BC_F_COPYOUT },
433 { bridge_ioctl_sifflags, sizeof(struct ifbreq),
434 BC_F_COPYIN|BC_F_SUSER },
435
436 { bridge_ioctl_scache, sizeof(struct ifbrparam),
437 BC_F_COPYIN|BC_F_SUSER },
438 { bridge_ioctl_gcache, sizeof(struct ifbrparam),
439 BC_F_COPYOUT },
440
441 { bridge_ioctl_gifs, sizeof(struct ifbifconf),
442 BC_F_COPYIN|BC_F_COPYOUT },
443 { bridge_ioctl_rts, sizeof(struct ifbaconf),
444 BC_F_COPYIN|BC_F_COPYOUT },
445
446 { bridge_ioctl_saddr, sizeof(struct ifbareq),
447 BC_F_COPYIN|BC_F_SUSER },
448
449 { bridge_ioctl_sto, sizeof(struct ifbrparam),
450 BC_F_COPYIN|BC_F_SUSER },
451 { bridge_ioctl_gto, sizeof(struct ifbrparam),
452 BC_F_COPYOUT },
453
454 { bridge_ioctl_daddr, sizeof(struct ifbareq),
455 BC_F_COPYIN|BC_F_SUSER },
456
457 { bridge_ioctl_flush, sizeof(struct ifbreq),
458 BC_F_COPYIN|BC_F_SUSER },
459
460 { bridge_ioctl_gpri, sizeof(struct ifbrparam),
461 BC_F_COPYOUT },
462 { bridge_ioctl_spri, sizeof(struct ifbrparam),
463 BC_F_COPYIN|BC_F_SUSER },
464
465 { bridge_ioctl_ght, sizeof(struct ifbrparam),
466 BC_F_COPYOUT },
467 { bridge_ioctl_sht, sizeof(struct ifbrparam),
468 BC_F_COPYIN|BC_F_SUSER },
469
470 { bridge_ioctl_gfd, sizeof(struct ifbrparam),
471 BC_F_COPYOUT },
472 { bridge_ioctl_sfd, sizeof(struct ifbrparam),
473 BC_F_COPYIN|BC_F_SUSER },
474
475 { bridge_ioctl_gma, sizeof(struct ifbrparam),
476 BC_F_COPYOUT },
477 { bridge_ioctl_sma, sizeof(struct ifbrparam),
478 BC_F_COPYIN|BC_F_SUSER },
479
480 { bridge_ioctl_sifprio, sizeof(struct ifbreq),
481 BC_F_COPYIN|BC_F_SUSER },
482
483 { bridge_ioctl_sifcost, sizeof(struct ifbreq),
484 BC_F_COPYIN|BC_F_SUSER },
485
486 { bridge_ioctl_addspan, sizeof(struct ifbreq),
487 BC_F_COPYIN|BC_F_SUSER },
488 { bridge_ioctl_delspan, sizeof(struct ifbreq),
489 BC_F_COPYIN|BC_F_SUSER },
490
491 { bridge_ioctl_gbparam, sizeof(struct ifbropreq),
492 BC_F_COPYOUT },
493
494 { bridge_ioctl_grte, sizeof(struct ifbrparam),
495 BC_F_COPYOUT },
496
497 { bridge_ioctl_gifsstp, sizeof(struct ifbpstpconf),
498 BC_F_COPYIN|BC_F_COPYOUT },
499
500 { bridge_ioctl_sproto, sizeof(struct ifbrparam),
501 BC_F_COPYIN|BC_F_SUSER },
502
503 { bridge_ioctl_stxhc, sizeof(struct ifbrparam),
504 BC_F_COPYIN|BC_F_SUSER },
505
506 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq),
507 BC_F_COPYIN|BC_F_SUSER },
508
509 };
510 const int bridge_control_table_size = nitems(bridge_control_table);
511
512 static VNET_DEFINE(LIST_HEAD(, bridge_softc), bridge_list);
513 #define V_bridge_list VNET(bridge_list)
514 #define BRIDGE_LIST_LOCK_INIT(x) mtx_init(&V_bridge_list_mtx, \
515 "if_bridge list", NULL, MTX_DEF)
516 #define BRIDGE_LIST_LOCK_DESTROY(x) mtx_destroy(&V_bridge_list_mtx)
517 #define BRIDGE_LIST_LOCK(x) mtx_lock(&V_bridge_list_mtx)
518 #define BRIDGE_LIST_UNLOCK(x) mtx_unlock(&V_bridge_list_mtx)
519
520 static VNET_DEFINE(struct if_clone *, bridge_cloner);
521 #define V_bridge_cloner VNET(bridge_cloner)
522
523 static const char bridge_name[] = "bridge";
524
525 static void
526 vnet_bridge_init(const void *unused __unused)
527 {
528
529 BRIDGE_LIST_LOCK_INIT();
530 LIST_INIT(&V_bridge_list);
531 V_bridge_cloner = if_clone_simple(bridge_name,
532 bridge_clone_create, bridge_clone_destroy, 0);
533 }
534 VNET_SYSINIT(vnet_bridge_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
535 vnet_bridge_init, NULL);
536
537 static void
538 vnet_bridge_uninit(const void *unused __unused)
539 {
540
541 if_clone_detach(V_bridge_cloner);
542 V_bridge_cloner = NULL;
543 BRIDGE_LIST_LOCK_DESTROY();
544 }
545 VNET_SYSUNINIT(vnet_bridge_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY,
546 vnet_bridge_uninit, NULL);
547
548 static int
549 bridge_modevent(module_t mod, int type, void *data)
550 {
551
552 switch (type) {
553 case MOD_LOAD:
554 bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
555 sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
556 UMA_ALIGN_PTR, 0);
557 bridge_input_p = bridge_input;
558 bridge_output_p = bridge_output;
559 bridge_dn_p = bridge_dummynet;
560 bridge_linkstate_p = bridge_linkstate;
561 bridge_detach_cookie = EVENTHANDLER_REGISTER(
562 ifnet_departure_event, bridge_ifdetach, NULL,
563 EVENTHANDLER_PRI_ANY);
564 break;
565 case MOD_UNLOAD:
566 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
567 bridge_detach_cookie);
568 uma_zdestroy(bridge_rtnode_zone);
569 bridge_input_p = NULL;
570 bridge_output_p = NULL;
571 bridge_dn_p = NULL;
572 bridge_linkstate_p = NULL;
573 break;
574 default:
575 return (EOPNOTSUPP);
576 }
577 return (0);
578 }
579
580 static moduledata_t bridge_mod = {
581 "if_bridge",
582 bridge_modevent,
583 0
584 };
585
586 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
587 MODULE_VERSION(if_bridge, 1);
588 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
589
590 /*
591 * handler for net.link.bridge.ipfw
592 */
593 static int
594 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
595 {
596 int enable = V_pfil_ipfw;
597 int error;
598
599 error = sysctl_handle_int(oidp, &enable, 0, req);
600 enable &= 1;
601
602 if (enable != V_pfil_ipfw) {
603 V_pfil_ipfw = enable;
604
605 /*
606 * Disable pfil so that ipfw doesnt run twice, if the user
607 * really wants both then they can re-enable pfil_bridge and/or
608 * pfil_member. Also allow non-ip packets as ipfw can filter by
609 * layer2 type.
610 */
611 if (V_pfil_ipfw) {
612 V_pfil_onlyip = 0;
613 V_pfil_bridge = 0;
614 V_pfil_member = 0;
615 }
616 }
617
618 return (error);
619 }
620 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw,
621 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET,
622 &VNET_NAME(pfil_ipfw), 0, &sysctl_pfil_ipfw, "I",
623 "Layer2 filter with IPFW");
624
625 /*
626 * bridge_clone_create:
627 *
628 * Create a new bridge instance.
629 */
630 static int
631 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params)
632 {
633 struct bridge_softc *sc, *sc2;
634 struct ifnet *bifp, *ifp;
635 int fb, retry;
636 unsigned long hostid;
637
638 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
639 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
640 if (ifp == NULL) {
641 free(sc, M_DEVBUF);
642 return (ENOSPC);
643 }
644
645 BRIDGE_LOCK_INIT(sc);
646 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
647 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
648
649 /* Initialize our routing table. */
650 bridge_rtable_init(sc);
651
652 callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0);
653
654 LIST_INIT(&sc->sc_iflist);
655 LIST_INIT(&sc->sc_spanlist);
656
657 ifp->if_softc = sc;
658 if_initname(ifp, bridge_name, unit);
659 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
660 ifp->if_ioctl = bridge_ioctl;
661 ifp->if_transmit = bridge_transmit;
662 ifp->if_qflush = bridge_qflush;
663 ifp->if_init = bridge_init;
664 ifp->if_type = IFT_BRIDGE;
665
666 /*
667 * Generate an ethernet address with a locally administered address.
668 *
669 * Since we are using random ethernet addresses for the bridge, it is
670 * possible that we might have address collisions, so make sure that
671 * this hardware address isn't already in use on another bridge.
672 * The first try uses the hostid and falls back to arc4rand().
673 */
674 fb = 0;
675 getcredhostid(curthread->td_ucred, &hostid);
676 do {
677 if (fb || hostid == 0) {
678 arc4rand(sc->sc_defaddr, ETHER_ADDR_LEN, 1);
679 sc->sc_defaddr[0] &= ~1;/* clear multicast bit */
680 sc->sc_defaddr[0] |= 2; /* set the LAA bit */
681 } else {
682 sc->sc_defaddr[0] = 0x2;
683 sc->sc_defaddr[1] = (hostid >> 24) & 0xff;
684 sc->sc_defaddr[2] = (hostid >> 16) & 0xff;
685 sc->sc_defaddr[3] = (hostid >> 8 ) & 0xff;
686 sc->sc_defaddr[4] = hostid & 0xff;
687 sc->sc_defaddr[5] = ifp->if_dunit & 0xff;
688 }
689
690 fb = 1;
691 retry = 0;
692 BRIDGE_LIST_LOCK();
693 LIST_FOREACH(sc2, &V_bridge_list, sc_list) {
694 bifp = sc2->sc_ifp;
695 if (memcmp(sc->sc_defaddr,
696 IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) {
697 retry = 1;
698 break;
699 }
700 }
701 BRIDGE_LIST_UNLOCK();
702 } while (retry == 1);
703
704 bstp_attach(&sc->sc_stp, &bridge_ops);
705 ether_ifattach(ifp, sc->sc_defaddr);
706 /* Now undo some of the damage... */
707 ifp->if_baudrate = 0;
708 ifp->if_type = IFT_BRIDGE;
709
710 BRIDGE_LIST_LOCK();
711 LIST_INSERT_HEAD(&V_bridge_list, sc, sc_list);
712 BRIDGE_LIST_UNLOCK();
713
714 return (0);
715 }
716
717 /*
718 * bridge_clone_destroy:
719 *
720 * Destroy a bridge instance.
721 */
722 static void
723 bridge_clone_destroy(struct ifnet *ifp)
724 {
725 struct bridge_softc *sc = ifp->if_softc;
726 struct bridge_iflist *bif;
727
728 BRIDGE_LOCK(sc);
729
730 bridge_stop(ifp, 1);
731 ifp->if_flags &= ~IFF_UP;
732
733 while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL)
734 bridge_delete_member(sc, bif, 0);
735
736 while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL) {
737 bridge_delete_span(sc, bif);
738 }
739
740 BRIDGE_UNLOCK(sc);
741
742 callout_drain(&sc->sc_brcallout);
743
744 BRIDGE_LIST_LOCK();
745 LIST_REMOVE(sc, sc_list);
746 BRIDGE_LIST_UNLOCK();
747
748 bstp_detach(&sc->sc_stp);
749 ether_ifdetach(ifp);
750 if_free(ifp);
751
752 /* Tear down the routing table. */
753 bridge_rtable_fini(sc);
754
755 BRIDGE_LOCK_DESTROY(sc);
756 free(sc, M_DEVBUF);
757 }
758
759 /*
760 * bridge_ioctl:
761 *
762 * Handle a control request from the operator.
763 */
764 static int
765 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
766 {
767 struct bridge_softc *sc = ifp->if_softc;
768 struct ifreq *ifr = (struct ifreq *)data;
769 struct bridge_iflist *bif;
770 struct thread *td = curthread;
771 union {
772 struct ifbreq ifbreq;
773 struct ifbifconf ifbifconf;
774 struct ifbareq ifbareq;
775 struct ifbaconf ifbaconf;
776 struct ifbrparam ifbrparam;
777 struct ifbropreq ifbropreq;
778 } args;
779 struct ifdrv *ifd = (struct ifdrv *) data;
780 const struct bridge_control *bc;
781 int error = 0;
782
783 switch (cmd) {
784
785 case SIOCADDMULTI:
786 case SIOCDELMULTI:
787 break;
788
789 case SIOCGDRVSPEC:
790 case SIOCSDRVSPEC:
791 if (ifd->ifd_cmd >= bridge_control_table_size) {
792 error = EINVAL;
793 break;
794 }
795 bc = &bridge_control_table[ifd->ifd_cmd];
796
797 if (cmd == SIOCGDRVSPEC &&
798 (bc->bc_flags & BC_F_COPYOUT) == 0) {
799 error = EINVAL;
800 break;
801 }
802 else if (cmd == SIOCSDRVSPEC &&
803 (bc->bc_flags & BC_F_COPYOUT) != 0) {
804 error = EINVAL;
805 break;
806 }
807
808 if (bc->bc_flags & BC_F_SUSER) {
809 error = priv_check(td, PRIV_NET_BRIDGE);
810 if (error)
811 break;
812 }
813
814 if (ifd->ifd_len != bc->bc_argsize ||
815 ifd->ifd_len > sizeof(args)) {
816 error = EINVAL;
817 break;
818 }
819
820 bzero(&args, sizeof(args));
821 if (bc->bc_flags & BC_F_COPYIN) {
822 error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
823 if (error)
824 break;
825 }
826
827 BRIDGE_LOCK(sc);
828 error = (*bc->bc_func)(sc, &args);
829 BRIDGE_UNLOCK(sc);
830 if (error)
831 break;
832
833 if (bc->bc_flags & BC_F_COPYOUT)
834 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
835
836 break;
837
838 case SIOCSIFFLAGS:
839 if (!(ifp->if_flags & IFF_UP) &&
840 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
841 /*
842 * If interface is marked down and it is running,
843 * then stop and disable it.
844 */
845 BRIDGE_LOCK(sc);
846 bridge_stop(ifp, 1);
847 BRIDGE_UNLOCK(sc);
848 } else if ((ifp->if_flags & IFF_UP) &&
849 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
850 /*
851 * If interface is marked up and it is stopped, then
852 * start it.
853 */
854 (*ifp->if_init)(sc);
855 }
856 break;
857
858 case SIOCSIFMTU:
859 if (ifr->ifr_mtu < 576) {
860 error = EINVAL;
861 break;
862 }
863 if (LIST_EMPTY(&sc->sc_iflist)) {
864 sc->sc_ifp->if_mtu = ifr->ifr_mtu;
865 break;
866 }
867 BRIDGE_LOCK(sc);
868 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
869 if (bif->bif_ifp->if_mtu != ifr->ifr_mtu) {
870 log(LOG_NOTICE, "%s: invalid MTU: %u(%s)"
871 " != %d\n", sc->sc_ifp->if_xname,
872 bif->bif_ifp->if_mtu,
873 bif->bif_ifp->if_xname, ifr->ifr_mtu);
874 error = EINVAL;
875 break;
876 }
877 }
878 if (!error)
879 sc->sc_ifp->if_mtu = ifr->ifr_mtu;
880 BRIDGE_UNLOCK(sc);
881 break;
882 default:
883 /*
884 * drop the lock as ether_ioctl() will call bridge_start() and
885 * cause the lock to be recursed.
886 */
887 error = ether_ioctl(ifp, cmd, data);
888 break;
889 }
890
891 return (error);
892 }
893
894 /*
895 * bridge_mutecaps:
896 *
897 * Clear or restore unwanted capabilities on the member interface
898 */
899 static void
900 bridge_mutecaps(struct bridge_softc *sc)
901 {
902 struct bridge_iflist *bif;
903 int enabled, mask;
904
905 /* Initial bitmask of capabilities to test */
906 mask = BRIDGE_IFCAPS_MASK;
907
908 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
909 /* Every member must support it or its disabled */
910 mask &= bif->bif_savedcaps;
911 }
912
913 BRIDGE_XLOCK(sc);
914 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
915 enabled = bif->bif_ifp->if_capenable;
916 enabled &= ~BRIDGE_IFCAPS_STRIP;
917 /* strip off mask bits and enable them again if allowed */
918 enabled &= ~BRIDGE_IFCAPS_MASK;
919 enabled |= mask;
920 BRIDGE_UNLOCK(sc);
921 bridge_set_ifcap(sc, bif, enabled);
922 BRIDGE_LOCK(sc);
923 }
924 BRIDGE_XDROP(sc);
925
926 }
927
928 static void
929 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
930 {
931 struct ifnet *ifp = bif->bif_ifp;
932 struct ifreq ifr;
933 int error;
934
935 BRIDGE_UNLOCK_ASSERT(sc);
936
937 bzero(&ifr, sizeof(ifr));
938 ifr.ifr_reqcap = set;
939
940 if (ifp->if_capenable != set) {
941 error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
942 if (error)
943 if_printf(sc->sc_ifp,
944 "error setting interface capabilities on %s\n",
945 ifp->if_xname);
946 }
947 }
948
949 /*
950 * bridge_lookup_member:
951 *
952 * Lookup a bridge member interface.
953 */
954 static struct bridge_iflist *
955 bridge_lookup_member(struct bridge_softc *sc, const char *name)
956 {
957 struct bridge_iflist *bif;
958 struct ifnet *ifp;
959
960 BRIDGE_LOCK_ASSERT(sc);
961
962 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
963 ifp = bif->bif_ifp;
964 if (strcmp(ifp->if_xname, name) == 0)
965 return (bif);
966 }
967
968 return (NULL);
969 }
970
971 /*
972 * bridge_lookup_member_if:
973 *
974 * Lookup a bridge member interface by ifnet*.
975 */
976 static struct bridge_iflist *
977 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
978 {
979 struct bridge_iflist *bif;
980
981 BRIDGE_LOCK_ASSERT(sc);
982
983 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
984 if (bif->bif_ifp == member_ifp)
985 return (bif);
986 }
987
988 return (NULL);
989 }
990
991 /*
992 * bridge_delete_member:
993 *
994 * Delete the specified member interface.
995 */
996 static void
997 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
998 int gone)
999 {
1000 struct ifnet *ifs = bif->bif_ifp;
1001 struct ifnet *fif = NULL;
1002
1003 BRIDGE_LOCK_ASSERT(sc);
1004
1005 if (bif->bif_flags & IFBIF_STP)
1006 bstp_disable(&bif->bif_stp);
1007
1008 ifs->if_bridge = NULL;
1009 BRIDGE_XLOCK(sc);
1010 LIST_REMOVE(bif, bif_next);
1011 BRIDGE_XDROP(sc);
1012
1013 /*
1014 * If removing the interface that gave the bridge its mac address, set
1015 * the mac address of the bridge to the address of the next member, or
1016 * to its default address if no members are left.
1017 */
1018 if (V_bridge_inherit_mac && sc->sc_ifaddr == ifs) {
1019 if (LIST_EMPTY(&sc->sc_iflist)) {
1020 bcopy(sc->sc_defaddr,
1021 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1022 sc->sc_ifaddr = NULL;
1023 } else {
1024 fif = LIST_FIRST(&sc->sc_iflist)->bif_ifp;
1025 bcopy(IF_LLADDR(fif),
1026 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1027 sc->sc_ifaddr = fif;
1028 }
1029 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1030 }
1031
1032 bridge_linkcheck(sc);
1033 bridge_mutecaps(sc); /* recalcuate now this interface is removed */
1034 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
1035 KASSERT(bif->bif_addrcnt == 0,
1036 ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
1037
1038 BRIDGE_UNLOCK(sc);
1039 if (!gone) {
1040 switch (ifs->if_type) {
1041 case IFT_ETHER:
1042 case IFT_L2VLAN:
1043 /*
1044 * Take the interface out of promiscuous mode, but only
1045 * if it was promiscuous in the first place. It might
1046 * not be if we're in the bridge_ioctl_add() error path.
1047 */
1048 if (ifs->if_flags & IFF_PROMISC)
1049 (void) ifpromisc(ifs, 0);
1050 break;
1051
1052 case IFT_GIF:
1053 break;
1054
1055 default:
1056 #ifdef DIAGNOSTIC
1057 panic("bridge_delete_member: impossible");
1058 #endif
1059 break;
1060 }
1061 /* reneable any interface capabilities */
1062 bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
1063 }
1064 bstp_destroy(&bif->bif_stp); /* prepare to free */
1065 BRIDGE_LOCK(sc);
1066 free(bif, M_DEVBUF);
1067 }
1068
1069 /*
1070 * bridge_delete_span:
1071 *
1072 * Delete the specified span interface.
1073 */
1074 static void
1075 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1076 {
1077 BRIDGE_LOCK_ASSERT(sc);
1078
1079 KASSERT(bif->bif_ifp->if_bridge == NULL,
1080 ("%s: not a span interface", __func__));
1081
1082 LIST_REMOVE(bif, bif_next);
1083 free(bif, M_DEVBUF);
1084 }
1085
1086 static int
1087 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1088 {
1089 struct ifbreq *req = arg;
1090 struct bridge_iflist *bif = NULL;
1091 struct ifnet *ifs;
1092 int error = 0;
1093
1094 ifs = ifunit(req->ifbr_ifsname);
1095 if (ifs == NULL)
1096 return (ENOENT);
1097 if (ifs->if_ioctl == NULL) /* must be supported */
1098 return (EINVAL);
1099
1100 /* If it's in the span list, it can't be a member. */
1101 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1102 if (ifs == bif->bif_ifp)
1103 return (EBUSY);
1104
1105 if (ifs->if_bridge == sc)
1106 return (EEXIST);
1107
1108 if (ifs->if_bridge != NULL)
1109 return (EBUSY);
1110
1111 switch (ifs->if_type) {
1112 case IFT_ETHER:
1113 case IFT_L2VLAN:
1114 case IFT_GIF:
1115 /* permitted interface types */
1116 break;
1117 default:
1118 return (EINVAL);
1119 }
1120
1121 #ifdef INET6
1122 /*
1123 * Two valid inet6 addresses with link-local scope must not be
1124 * on the parent interface and the member interfaces at the
1125 * same time. This restriction is needed to prevent violation
1126 * of link-local scope zone. Attempts to add a member
1127 * interface which has inet6 addresses when the parent has
1128 * inet6 triggers removal of all inet6 addresses on the member
1129 * interface.
1130 */
1131
1132 /* Check if the parent interface has a link-local scope addr. */
1133 if (V_allow_llz_overlap == 0 &&
1134 in6ifa_llaonifp(sc->sc_ifp) != NULL) {
1135 /*
1136 * If any, remove all inet6 addresses from the member
1137 * interfaces.
1138 */
1139 BRIDGE_XLOCK(sc);
1140 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1141 if (in6ifa_llaonifp(bif->bif_ifp)) {
1142 BRIDGE_UNLOCK(sc);
1143 in6_ifdetach(bif->bif_ifp);
1144 BRIDGE_LOCK(sc);
1145 if_printf(sc->sc_ifp,
1146 "IPv6 addresses on %s have been removed "
1147 "before adding it as a member to prevent "
1148 "IPv6 address scope violation.\n",
1149 bif->bif_ifp->if_xname);
1150 }
1151 }
1152 BRIDGE_XDROP(sc);
1153 if (in6ifa_llaonifp(ifs)) {
1154 BRIDGE_UNLOCK(sc);
1155 in6_ifdetach(ifs);
1156 BRIDGE_LOCK(sc);
1157 if_printf(sc->sc_ifp,
1158 "IPv6 addresses on %s have been removed "
1159 "before adding it as a member to prevent "
1160 "IPv6 address scope violation.\n",
1161 ifs->if_xname);
1162 }
1163 }
1164 #endif
1165 /* Allow the first Ethernet member to define the MTU */
1166 if (LIST_EMPTY(&sc->sc_iflist))
1167 sc->sc_ifp->if_mtu = ifs->if_mtu;
1168 else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
1169 if_printf(sc->sc_ifp, "invalid MTU: %u(%s) != %u\n",
1170 ifs->if_mtu, ifs->if_xname, sc->sc_ifp->if_mtu);
1171 return (EINVAL);
1172 }
1173
1174 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1175 if (bif == NULL)
1176 return (ENOMEM);
1177
1178 bif->bif_ifp = ifs;
1179 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
1180 bif->bif_savedcaps = ifs->if_capenable;
1181
1182 /*
1183 * Assign the interface's MAC address to the bridge if it's the first
1184 * member and the MAC address of the bridge has not been changed from
1185 * the default randomly generated one.
1186 */
1187 if (V_bridge_inherit_mac && LIST_EMPTY(&sc->sc_iflist) &&
1188 !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr, ETHER_ADDR_LEN)) {
1189 bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1190 sc->sc_ifaddr = ifs;
1191 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1192 }
1193
1194 ifs->if_bridge = sc;
1195 bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
1196 /*
1197 * XXX: XLOCK HERE!?!
1198 *
1199 * NOTE: insert_***HEAD*** should be safe for the traversals.
1200 */
1201 LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
1202
1203 /* Set interface capabilities to the intersection set of all members */
1204 bridge_mutecaps(sc);
1205 bridge_linkcheck(sc);
1206
1207 /* Place the interface into promiscuous mode */
1208 switch (ifs->if_type) {
1209 case IFT_ETHER:
1210 case IFT_L2VLAN:
1211 BRIDGE_UNLOCK(sc);
1212 error = ifpromisc(ifs, 1);
1213 BRIDGE_LOCK(sc);
1214 break;
1215 }
1216
1217 if (error)
1218 bridge_delete_member(sc, bif, 0);
1219 return (error);
1220 }
1221
1222 static int
1223 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1224 {
1225 struct ifbreq *req = arg;
1226 struct bridge_iflist *bif;
1227
1228 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1229 if (bif == NULL)
1230 return (ENOENT);
1231
1232 bridge_delete_member(sc, bif, 0);
1233
1234 return (0);
1235 }
1236
1237 static int
1238 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1239 {
1240 struct ifbreq *req = arg;
1241 struct bridge_iflist *bif;
1242 struct bstp_port *bp;
1243
1244 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1245 if (bif == NULL)
1246 return (ENOENT);
1247
1248 bp = &bif->bif_stp;
1249 req->ifbr_ifsflags = bif->bif_flags;
1250 req->ifbr_state = bp->bp_state;
1251 req->ifbr_priority = bp->bp_priority;
1252 req->ifbr_path_cost = bp->bp_path_cost;
1253 req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1254 req->ifbr_proto = bp->bp_protover;
1255 req->ifbr_role = bp->bp_role;
1256 req->ifbr_stpflags = bp->bp_flags;
1257 req->ifbr_addrcnt = bif->bif_addrcnt;
1258 req->ifbr_addrmax = bif->bif_addrmax;
1259 req->ifbr_addrexceeded = bif->bif_addrexceeded;
1260
1261 /* Copy STP state options as flags */
1262 if (bp->bp_operedge)
1263 req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
1264 if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
1265 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
1266 if (bp->bp_ptp_link)
1267 req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
1268 if (bp->bp_flags & BSTP_PORT_AUTOPTP)
1269 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
1270 if (bp->bp_flags & BSTP_PORT_ADMEDGE)
1271 req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
1272 if (bp->bp_flags & BSTP_PORT_ADMCOST)
1273 req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
1274 return (0);
1275 }
1276
1277 static int
1278 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1279 {
1280 struct ifbreq *req = arg;
1281 struct bridge_iflist *bif;
1282 struct bstp_port *bp;
1283 int error;
1284
1285 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1286 if (bif == NULL)
1287 return (ENOENT);
1288 bp = &bif->bif_stp;
1289
1290 if (req->ifbr_ifsflags & IFBIF_SPAN)
1291 /* SPAN is readonly */
1292 return (EINVAL);
1293
1294 if (req->ifbr_ifsflags & IFBIF_STP) {
1295 if ((bif->bif_flags & IFBIF_STP) == 0) {
1296 error = bstp_enable(&bif->bif_stp);
1297 if (error)
1298 return (error);
1299 }
1300 } else {
1301 if ((bif->bif_flags & IFBIF_STP) != 0)
1302 bstp_disable(&bif->bif_stp);
1303 }
1304
1305 /* Pass on STP flags */
1306 bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
1307 bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
1308 bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
1309 bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
1310
1311 /* Save the bits relating to the bridge */
1312 bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
1313
1314 return (0);
1315 }
1316
1317 static int
1318 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1319 {
1320 struct ifbrparam *param = arg;
1321
1322 sc->sc_brtmax = param->ifbrp_csize;
1323 bridge_rttrim(sc);
1324
1325 return (0);
1326 }
1327
1328 static int
1329 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1330 {
1331 struct ifbrparam *param = arg;
1332
1333 param->ifbrp_csize = sc->sc_brtmax;
1334
1335 return (0);
1336 }
1337
1338 static int
1339 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1340 {
1341 struct ifbifconf *bifc = arg;
1342 struct bridge_iflist *bif;
1343 struct ifbreq breq;
1344 char *buf, *outbuf;
1345 int count, buflen, len, error = 0;
1346
1347 count = 0;
1348 LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1349 count++;
1350 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1351 count++;
1352
1353 buflen = sizeof(breq) * count;
1354 if (bifc->ifbic_len == 0) {
1355 bifc->ifbic_len = buflen;
1356 return (0);
1357 }
1358 BRIDGE_UNLOCK(sc);
1359 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1360 BRIDGE_LOCK(sc);
1361
1362 count = 0;
1363 buf = outbuf;
1364 len = min(bifc->ifbic_len, buflen);
1365 bzero(&breq, sizeof(breq));
1366 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1367 if (len < sizeof(breq))
1368 break;
1369
1370 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1371 sizeof(breq.ifbr_ifsname));
1372 /* Fill in the ifbreq structure */
1373 error = bridge_ioctl_gifflags(sc, &breq);
1374 if (error)
1375 break;
1376 memcpy(buf, &breq, sizeof(breq));
1377 count++;
1378 buf += sizeof(breq);
1379 len -= sizeof(breq);
1380 }
1381 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1382 if (len < sizeof(breq))
1383 break;
1384
1385 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1386 sizeof(breq.ifbr_ifsname));
1387 breq.ifbr_ifsflags = bif->bif_flags;
1388 breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1389 memcpy(buf, &breq, sizeof(breq));
1390 count++;
1391 buf += sizeof(breq);
1392 len -= sizeof(breq);
1393 }
1394
1395 BRIDGE_UNLOCK(sc);
1396 bifc->ifbic_len = sizeof(breq) * count;
1397 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
1398 BRIDGE_LOCK(sc);
1399 free(outbuf, M_TEMP);
1400 return (error);
1401 }
1402
1403 static int
1404 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1405 {
1406 struct ifbaconf *bac = arg;
1407 struct bridge_rtnode *brt;
1408 struct ifbareq bareq;
1409 char *buf, *outbuf;
1410 int count, buflen, len, error = 0;
1411
1412 if (bac->ifbac_len == 0)
1413 return (0);
1414
1415 count = 0;
1416 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
1417 count++;
1418 buflen = sizeof(bareq) * count;
1419
1420 BRIDGE_UNLOCK(sc);
1421 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1422 BRIDGE_LOCK(sc);
1423
1424 count = 0;
1425 buf = outbuf;
1426 len = min(bac->ifbac_len, buflen);
1427 bzero(&bareq, sizeof(bareq));
1428 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1429 if (len < sizeof(bareq))
1430 goto out;
1431 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1432 sizeof(bareq.ifba_ifsname));
1433 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1434 bareq.ifba_vlan = brt->brt_vlan;
1435 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1436 time_uptime < brt->brt_expire)
1437 bareq.ifba_expire = brt->brt_expire - time_uptime;
1438 else
1439 bareq.ifba_expire = 0;
1440 bareq.ifba_flags = brt->brt_flags;
1441
1442 memcpy(buf, &bareq, sizeof(bareq));
1443 count++;
1444 buf += sizeof(bareq);
1445 len -= sizeof(bareq);
1446 }
1447 out:
1448 BRIDGE_UNLOCK(sc);
1449 bac->ifbac_len = sizeof(bareq) * count;
1450 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
1451 BRIDGE_LOCK(sc);
1452 free(outbuf, M_TEMP);
1453 return (error);
1454 }
1455
1456 static int
1457 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1458 {
1459 struct ifbareq *req = arg;
1460 struct bridge_iflist *bif;
1461 int error;
1462
1463 bif = bridge_lookup_member(sc, req->ifba_ifsname);
1464 if (bif == NULL)
1465 return (ENOENT);
1466
1467 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
1468 req->ifba_flags);
1469
1470 return (error);
1471 }
1472
1473 static int
1474 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1475 {
1476 struct ifbrparam *param = arg;
1477
1478 sc->sc_brttimeout = param->ifbrp_ctime;
1479 return (0);
1480 }
1481
1482 static int
1483 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1484 {
1485 struct ifbrparam *param = arg;
1486
1487 param->ifbrp_ctime = sc->sc_brttimeout;
1488 return (0);
1489 }
1490
1491 static int
1492 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1493 {
1494 struct ifbareq *req = arg;
1495
1496 return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
1497 }
1498
1499 static int
1500 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1501 {
1502 struct ifbreq *req = arg;
1503
1504 bridge_rtflush(sc, req->ifbr_ifsflags);
1505 return (0);
1506 }
1507
1508 static int
1509 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1510 {
1511 struct ifbrparam *param = arg;
1512 struct bstp_state *bs = &sc->sc_stp;
1513
1514 param->ifbrp_prio = bs->bs_bridge_priority;
1515 return (0);
1516 }
1517
1518 static int
1519 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1520 {
1521 struct ifbrparam *param = arg;
1522
1523 return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
1524 }
1525
1526 static int
1527 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1528 {
1529 struct ifbrparam *param = arg;
1530 struct bstp_state *bs = &sc->sc_stp;
1531
1532 param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
1533 return (0);
1534 }
1535
1536 static int
1537 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1538 {
1539 struct ifbrparam *param = arg;
1540
1541 return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
1542 }
1543
1544 static int
1545 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1546 {
1547 struct ifbrparam *param = arg;
1548 struct bstp_state *bs = &sc->sc_stp;
1549
1550 param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
1551 return (0);
1552 }
1553
1554 static int
1555 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1556 {
1557 struct ifbrparam *param = arg;
1558
1559 return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
1560 }
1561
1562 static int
1563 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1564 {
1565 struct ifbrparam *param = arg;
1566 struct bstp_state *bs = &sc->sc_stp;
1567
1568 param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
1569 return (0);
1570 }
1571
1572 static int
1573 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1574 {
1575 struct ifbrparam *param = arg;
1576
1577 return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
1578 }
1579
1580 static int
1581 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1582 {
1583 struct ifbreq *req = arg;
1584 struct bridge_iflist *bif;
1585
1586 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1587 if (bif == NULL)
1588 return (ENOENT);
1589
1590 return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
1591 }
1592
1593 static int
1594 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1595 {
1596 struct ifbreq *req = arg;
1597 struct bridge_iflist *bif;
1598
1599 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1600 if (bif == NULL)
1601 return (ENOENT);
1602
1603 return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
1604 }
1605
1606 static int
1607 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
1608 {
1609 struct ifbreq *req = arg;
1610 struct bridge_iflist *bif;
1611
1612 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1613 if (bif == NULL)
1614 return (ENOENT);
1615
1616 bif->bif_addrmax = req->ifbr_addrmax;
1617 return (0);
1618 }
1619
1620 static int
1621 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1622 {
1623 struct ifbreq *req = arg;
1624 struct bridge_iflist *bif = NULL;
1625 struct ifnet *ifs;
1626
1627 ifs = ifunit(req->ifbr_ifsname);
1628 if (ifs == NULL)
1629 return (ENOENT);
1630
1631 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1632 if (ifs == bif->bif_ifp)
1633 return (EBUSY);
1634
1635 if (ifs->if_bridge != NULL)
1636 return (EBUSY);
1637
1638 switch (ifs->if_type) {
1639 case IFT_ETHER:
1640 case IFT_GIF:
1641 case IFT_L2VLAN:
1642 break;
1643 default:
1644 return (EINVAL);
1645 }
1646
1647 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1648 if (bif == NULL)
1649 return (ENOMEM);
1650
1651 bif->bif_ifp = ifs;
1652 bif->bif_flags = IFBIF_SPAN;
1653
1654 LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1655
1656 return (0);
1657 }
1658
1659 static int
1660 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1661 {
1662 struct ifbreq *req = arg;
1663 struct bridge_iflist *bif;
1664 struct ifnet *ifs;
1665
1666 ifs = ifunit(req->ifbr_ifsname);
1667 if (ifs == NULL)
1668 return (ENOENT);
1669
1670 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1671 if (ifs == bif->bif_ifp)
1672 break;
1673
1674 if (bif == NULL)
1675 return (ENOENT);
1676
1677 bridge_delete_span(sc, bif);
1678
1679 return (0);
1680 }
1681
1682 static int
1683 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
1684 {
1685 struct ifbropreq *req = arg;
1686 struct bstp_state *bs = &sc->sc_stp;
1687 struct bstp_port *root_port;
1688
1689 req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
1690 req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
1691 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
1692
1693 root_port = bs->bs_root_port;
1694 if (root_port == NULL)
1695 req->ifbop_root_port = 0;
1696 else
1697 req->ifbop_root_port = root_port->bp_ifp->if_index;
1698
1699 req->ifbop_holdcount = bs->bs_txholdcount;
1700 req->ifbop_priority = bs->bs_bridge_priority;
1701 req->ifbop_protocol = bs->bs_protover;
1702 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
1703 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
1704 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
1705 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
1706 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
1707 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
1708
1709 return (0);
1710 }
1711
1712 static int
1713 bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
1714 {
1715 struct ifbrparam *param = arg;
1716
1717 param->ifbrp_cexceeded = sc->sc_brtexceeded;
1718 return (0);
1719 }
1720
1721 static int
1722 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
1723 {
1724 struct ifbpstpconf *bifstp = arg;
1725 struct bridge_iflist *bif;
1726 struct bstp_port *bp;
1727 struct ifbpstpreq bpreq;
1728 char *buf, *outbuf;
1729 int count, buflen, len, error = 0;
1730
1731 count = 0;
1732 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1733 if ((bif->bif_flags & IFBIF_STP) != 0)
1734 count++;
1735 }
1736
1737 buflen = sizeof(bpreq) * count;
1738 if (bifstp->ifbpstp_len == 0) {
1739 bifstp->ifbpstp_len = buflen;
1740 return (0);
1741 }
1742
1743 BRIDGE_UNLOCK(sc);
1744 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1745 BRIDGE_LOCK(sc);
1746
1747 count = 0;
1748 buf = outbuf;
1749 len = min(bifstp->ifbpstp_len, buflen);
1750 bzero(&bpreq, sizeof(bpreq));
1751 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1752 if (len < sizeof(bpreq))
1753 break;
1754
1755 if ((bif->bif_flags & IFBIF_STP) == 0)
1756 continue;
1757
1758 bp = &bif->bif_stp;
1759 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
1760 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
1761 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
1762 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
1763 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
1764 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
1765
1766 memcpy(buf, &bpreq, sizeof(bpreq));
1767 count++;
1768 buf += sizeof(bpreq);
1769 len -= sizeof(bpreq);
1770 }
1771
1772 BRIDGE_UNLOCK(sc);
1773 bifstp->ifbpstp_len = sizeof(bpreq) * count;
1774 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
1775 BRIDGE_LOCK(sc);
1776 free(outbuf, M_TEMP);
1777 return (error);
1778 }
1779
1780 static int
1781 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
1782 {
1783 struct ifbrparam *param = arg;
1784
1785 return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
1786 }
1787
1788 static int
1789 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
1790 {
1791 struct ifbrparam *param = arg;
1792
1793 return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
1794 }
1795
1796 /*
1797 * bridge_ifdetach:
1798 *
1799 * Detach an interface from a bridge. Called when a member
1800 * interface is detaching.
1801 */
1802 static void
1803 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1804 {
1805 struct bridge_softc *sc = ifp->if_bridge;
1806 struct bridge_iflist *bif;
1807
1808 if (ifp->if_flags & IFF_RENAMING)
1809 return;
1810 if (V_bridge_cloner == NULL) {
1811 /*
1812 * This detach handler can be called after
1813 * vnet_bridge_uninit(). Just return in that case.
1814 */
1815 return;
1816 }
1817 /* Check if the interface is a bridge member */
1818 if (sc != NULL) {
1819 BRIDGE_LOCK(sc);
1820
1821 bif = bridge_lookup_member_if(sc, ifp);
1822 if (bif != NULL)
1823 bridge_delete_member(sc, bif, 1);
1824
1825 BRIDGE_UNLOCK(sc);
1826 return;
1827 }
1828
1829 /* Check if the interface is a span port */
1830 BRIDGE_LIST_LOCK();
1831 LIST_FOREACH(sc, &V_bridge_list, sc_list) {
1832 BRIDGE_LOCK(sc);
1833 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1834 if (ifp == bif->bif_ifp) {
1835 bridge_delete_span(sc, bif);
1836 break;
1837 }
1838
1839 BRIDGE_UNLOCK(sc);
1840 }
1841 BRIDGE_LIST_UNLOCK();
1842 }
1843
1844 /*
1845 * bridge_init:
1846 *
1847 * Initialize a bridge interface.
1848 */
1849 static void
1850 bridge_init(void *xsc)
1851 {
1852 struct bridge_softc *sc = (struct bridge_softc *)xsc;
1853 struct ifnet *ifp = sc->sc_ifp;
1854
1855 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1856 return;
1857
1858 BRIDGE_LOCK(sc);
1859 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1860 bridge_timer, sc);
1861
1862 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1863 bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */
1864
1865 BRIDGE_UNLOCK(sc);
1866 }
1867
1868 /*
1869 * bridge_stop:
1870 *
1871 * Stop the bridge interface.
1872 */
1873 static void
1874 bridge_stop(struct ifnet *ifp, int disable)
1875 {
1876 struct bridge_softc *sc = ifp->if_softc;
1877
1878 BRIDGE_LOCK_ASSERT(sc);
1879
1880 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1881 return;
1882
1883 callout_stop(&sc->sc_brcallout);
1884 bstp_stop(&sc->sc_stp);
1885
1886 bridge_rtflush(sc, IFBF_FLUSHDYN);
1887
1888 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1889 }
1890
1891 /*
1892 * bridge_enqueue:
1893 *
1894 * Enqueue a packet on a bridge member interface.
1895 *
1896 */
1897 static int
1898 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
1899 {
1900 int len, err = 0;
1901 short mflags;
1902 struct mbuf *m0;
1903
1904 /* We may be sending a fragment so traverse the mbuf */
1905 for (; m; m = m0) {
1906 m0 = m->m_nextpkt;
1907 m->m_nextpkt = NULL;
1908 len = m->m_pkthdr.len;
1909 mflags = m->m_flags;
1910
1911 /*
1912 * If underlying interface can not do VLAN tag insertion itself
1913 * then attach a packet tag that holds it.
1914 */
1915 if ((m->m_flags & M_VLANTAG) &&
1916 (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
1917 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1918 if (m == NULL) {
1919 if_printf(dst_ifp,
1920 "unable to prepend VLAN header\n");
1921 if_inc_counter(dst_ifp, IFCOUNTER_OERRORS, 1);
1922 continue;
1923 }
1924 m->m_flags &= ~M_VLANTAG;
1925 }
1926
1927 M_ASSERTPKTHDR(m); /* We shouldn't transmit mbuf without pkthdr */
1928 if ((err = dst_ifp->if_transmit(dst_ifp, m))) {
1929 m_freem(m0);
1930 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
1931 break;
1932 }
1933
1934 if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
1935 if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, len);
1936 if (mflags & M_MCAST)
1937 if_inc_counter(sc->sc_ifp, IFCOUNTER_OMCASTS, 1);
1938 }
1939
1940 return (err);
1941 }
1942
1943 /*
1944 * bridge_dummynet:
1945 *
1946 * Receive a queued packet from dummynet and pass it on to the output
1947 * interface.
1948 *
1949 * The mbuf has the Ethernet header already attached.
1950 */
1951 static void
1952 bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
1953 {
1954 struct bridge_softc *sc;
1955
1956 sc = ifp->if_bridge;
1957
1958 /*
1959 * The packet didnt originate from a member interface. This should only
1960 * ever happen if a member interface is removed while packets are
1961 * queued for it.
1962 */
1963 if (sc == NULL) {
1964 m_freem(m);
1965 return;
1966 }
1967
1968 if (PFIL_HOOKED(&V_inet_pfil_hook)
1969 #ifdef INET6
1970 || PFIL_HOOKED(&V_inet6_pfil_hook)
1971 #endif
1972 ) {
1973 if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
1974 return;
1975 if (m == NULL)
1976 return;
1977 }
1978
1979 bridge_enqueue(sc, ifp, m);
1980 }
1981
1982 /*
1983 * bridge_output:
1984 *
1985 * Send output from a bridge member interface. This
1986 * performs the bridging function for locally originated
1987 * packets.
1988 *
1989 * The mbuf has the Ethernet header already attached. We must
1990 * enqueue or free the mbuf before returning.
1991 */
1992 static int
1993 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
1994 struct rtentry *rt)
1995 {
1996 struct ether_header *eh;
1997 struct ifnet *dst_if;
1998 struct bridge_softc *sc;
1999 uint16_t vlan;
2000
2001 if (m->m_len < ETHER_HDR_LEN) {
2002 m = m_pullup(m, ETHER_HDR_LEN);
2003 if (m == NULL)
2004 return (0);
2005 }
2006
2007 eh = mtod(m, struct ether_header *);
2008 sc = ifp->if_bridge;
2009 vlan = VLANTAGOF(m);
2010
2011 BRIDGE_LOCK(sc);
2012
2013 /*
2014 * If bridge is down, but the original output interface is up,
2015 * go ahead and send out that interface. Otherwise, the packet
2016 * is dropped below.
2017 */
2018 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2019 dst_if = ifp;
2020 goto sendunicast;
2021 }
2022
2023 /*
2024 * If the packet is a multicast, or we don't know a better way to
2025 * get there, send to all interfaces.
2026 */
2027 if (ETHER_IS_MULTICAST(eh->ether_dhost))
2028 dst_if = NULL;
2029 else
2030 dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
2031 if (dst_if == NULL) {
2032 struct bridge_iflist *bif;
2033 struct mbuf *mc;
2034 int error = 0, used = 0;
2035
2036 bridge_span(sc, m);
2037
2038 BRIDGE_LOCK2REF(sc, error);
2039 if (error) {
2040 m_freem(m);
2041 return (0);
2042 }
2043
2044 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2045 dst_if = bif->bif_ifp;
2046
2047 if (dst_if->if_type == IFT_GIF)
2048 continue;
2049 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2050 continue;
2051
2052 /*
2053 * If this is not the original output interface,
2054 * and the interface is participating in spanning
2055 * tree, make sure the port is in a state that
2056 * allows forwarding.
2057 */
2058 if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
2059 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2060 continue;
2061
2062 if (LIST_NEXT(bif, bif_next) == NULL) {
2063 used = 1;
2064 mc = m;
2065 } else {
2066 mc = m_copypacket(m, M_NOWAIT);
2067 if (mc == NULL) {
2068 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2069 continue;
2070 }
2071 }
2072
2073 bridge_enqueue(sc, dst_if, mc);
2074 }
2075 if (used == 0)
2076 m_freem(m);
2077 BRIDGE_UNREF(sc);
2078 return (0);
2079 }
2080
2081 sendunicast:
2082 /*
2083 * XXX Spanning tree consideration here?
2084 */
2085
2086 bridge_span(sc, m);
2087 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2088 m_freem(m);
2089 BRIDGE_UNLOCK(sc);
2090 return (0);
2091 }
2092
2093 BRIDGE_UNLOCK(sc);
2094 bridge_enqueue(sc, dst_if, m);
2095 return (0);
2096 }
2097
2098 /*
2099 * bridge_transmit:
2100 *
2101 * Do output on a bridge.
2102 *
2103 */
2104 static int
2105 bridge_transmit(struct ifnet *ifp, struct mbuf *m)
2106 {
2107 struct bridge_softc *sc;
2108 struct ether_header *eh;
2109 struct ifnet *dst_if;
2110 int error = 0;
2111
2112 sc = ifp->if_softc;
2113
2114 ETHER_BPF_MTAP(ifp, m);
2115
2116 eh = mtod(m, struct ether_header *);
2117
2118 BRIDGE_LOCK(sc);
2119 if (((m->m_flags & (M_BCAST|M_MCAST)) == 0) &&
2120 (dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1)) != NULL) {
2121 BRIDGE_UNLOCK(sc);
2122 error = bridge_enqueue(sc, dst_if, m);
2123 } else
2124 bridge_broadcast(sc, ifp, m, 0);
2125
2126 return (error);
2127 }
2128
2129 /*
2130 * The ifp->if_qflush entry point for if_bridge(4) is no-op.
2131 */
2132 static void
2133 bridge_qflush(struct ifnet *ifp __unused)
2134 {
2135 }
2136
2137 /*
2138 * bridge_forward:
2139 *
2140 * The forwarding function of the bridge.
2141 *
2142 * NOTE: Releases the lock on return.
2143 */
2144 static void
2145 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
2146 struct mbuf *m)
2147 {
2148 struct bridge_iflist *dbif;
2149 struct ifnet *src_if, *dst_if, *ifp;
2150 struct ether_header *eh;
2151 uint16_t vlan;
2152 uint8_t *dst;
2153 int error;
2154
2155 src_if = m->m_pkthdr.rcvif;
2156 ifp = sc->sc_ifp;
2157
2158 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2159 if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2160 vlan = VLANTAGOF(m);
2161
2162 if ((sbif->bif_flags & IFBIF_STP) &&
2163 sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2164 goto drop;
2165
2166 eh = mtod(m, struct ether_header *);
2167 dst = eh->ether_dhost;
2168
2169 /* If the interface is learning, record the address. */
2170 if (sbif->bif_flags & IFBIF_LEARNING) {
2171 error = bridge_rtupdate(sc, eh->ether_shost, vlan,
2172 sbif, 0, IFBAF_DYNAMIC);
2173 /*
2174 * If the interface has addresses limits then deny any source
2175 * that is not in the cache.
2176 */
2177 if (error && sbif->bif_addrmax)
2178 goto drop;
2179 }
2180
2181 if ((sbif->bif_flags & IFBIF_STP) != 0 &&
2182 sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
2183 goto drop;
2184
2185 /*
2186 * At this point, the port either doesn't participate
2187 * in spanning tree or it is in the forwarding state.
2188 */
2189
2190 /*
2191 * If the packet is unicast, destined for someone on
2192 * "this" side of the bridge, drop it.
2193 */
2194 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2195 dst_if = bridge_rtlookup(sc, dst, vlan);
2196 if (src_if == dst_if)
2197 goto drop;
2198 } else {
2199 /*
2200 * Check if its a reserved multicast address, any address
2201 * listed in 802.1D section 7.12.6 may not be forwarded by the
2202 * bridge.
2203 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
2204 */
2205 if (dst[0] == 0x01 && dst[1] == 0x80 &&
2206 dst[2] == 0xc2 && dst[3] == 0x00 &&
2207 dst[4] == 0x00 && dst[5] <= 0x0f)
2208 goto drop;
2209
2210 /* ...forward it to all interfaces. */
2211 if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
2212 dst_if = NULL;
2213 }
2214
2215 /*
2216 * If we have a destination interface which is a member of our bridge,
2217 * OR this is a unicast packet, push it through the bpf(4) machinery.
2218 * For broadcast or multicast packets, don't bother because it will
2219 * be reinjected into ether_input. We do this before we pass the packets
2220 * through the pfil(9) framework, as it is possible that pfil(9) will
2221 * drop the packet, or possibly modify it, making it difficult to debug
2222 * firewall issues on the bridge.
2223 */
2224 if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
2225 ETHER_BPF_MTAP(ifp, m);
2226
2227 /* run the packet filter */
2228 if (PFIL_HOOKED(&V_inet_pfil_hook)
2229 #ifdef INET6
2230 || PFIL_HOOKED(&V_inet6_pfil_hook)
2231 #endif
2232 ) {
2233 BRIDGE_UNLOCK(sc);
2234 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2235 return;
2236 if (m == NULL)
2237 return;
2238 BRIDGE_LOCK(sc);
2239 }
2240
2241 if (dst_if == NULL) {
2242 bridge_broadcast(sc, src_if, m, 1);
2243 return;
2244 }
2245
2246 /*
2247 * At this point, we're dealing with a unicast frame
2248 * going to a different interface.
2249 */
2250 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2251 goto drop;
2252
2253 dbif = bridge_lookup_member_if(sc, dst_if);
2254 if (dbif == NULL)
2255 /* Not a member of the bridge (anymore?) */
2256 goto drop;
2257
2258 /* Private segments can not talk to each other */
2259 if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
2260 goto drop;
2261
2262 if ((dbif->bif_flags & IFBIF_STP) &&
2263 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2264 goto drop;
2265
2266 BRIDGE_UNLOCK(sc);
2267
2268 if (PFIL_HOOKED(&V_inet_pfil_hook)
2269 #ifdef INET6
2270 || PFIL_HOOKED(&V_inet6_pfil_hook)
2271 #endif
2272 ) {
2273 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2274 return;
2275 if (m == NULL)
2276 return;
2277 }
2278
2279 bridge_enqueue(sc, dst_if, m);
2280 return;
2281
2282 drop:
2283 BRIDGE_UNLOCK(sc);
2284 m_freem(m);
2285 }
2286
2287 /*
2288 * bridge_input:
2289 *
2290 * Receive input from a member interface. Queue the packet for
2291 * bridging if it is not for us.
2292 */
2293 static struct mbuf *
2294 bridge_input(struct ifnet *ifp, struct mbuf *m)
2295 {
2296 struct bridge_softc *sc = ifp->if_bridge;
2297 struct bridge_iflist *bif, *bif2;
2298 struct ifnet *bifp;
2299 struct ether_header *eh;
2300 struct mbuf *mc, *mc2;
2301 uint16_t vlan;
2302 int error;
2303
2304 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2305 return (m);
2306
2307 bifp = sc->sc_ifp;
2308 vlan = VLANTAGOF(m);
2309
2310 /*
2311 * Implement support for bridge monitoring. If this flag has been
2312 * set on this interface, discard the packet once we push it through
2313 * the bpf(4) machinery, but before we do, increment the byte and
2314 * packet counters associated with this interface.
2315 */
2316 if ((bifp->if_flags & IFF_MONITOR) != 0) {
2317 m->m_pkthdr.rcvif = bifp;
2318 ETHER_BPF_MTAP(bifp, m);
2319 if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);
2320 if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2321 m_freem(m);
2322 return (NULL);
2323 }
2324 BRIDGE_LOCK(sc);
2325 bif = bridge_lookup_member_if(sc, ifp);
2326 if (bif == NULL) {
2327 BRIDGE_UNLOCK(sc);
2328 return (m);
2329 }
2330
2331 eh = mtod(m, struct ether_header *);
2332
2333 bridge_span(sc, m);
2334
2335 if (m->m_flags & (M_BCAST|M_MCAST)) {
2336 /* Tap off 802.1D packets; they do not get forwarded. */
2337 if (memcmp(eh->ether_dhost, bstp_etheraddr,
2338 ETHER_ADDR_LEN) == 0) {
2339 bstp_input(&bif->bif_stp, ifp, m); /* consumes mbuf */
2340 BRIDGE_UNLOCK(sc);
2341 return (NULL);
2342 }
2343
2344 if ((bif->bif_flags & IFBIF_STP) &&
2345 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2346 BRIDGE_UNLOCK(sc);
2347 return (m);
2348 }
2349
2350 /*
2351 * Make a deep copy of the packet and enqueue the copy
2352 * for bridge processing; return the original packet for
2353 * local processing.
2354 */
2355 mc = m_dup(m, M_NOWAIT);
2356 if (mc == NULL) {
2357 BRIDGE_UNLOCK(sc);
2358 return (m);
2359 }
2360
2361 /* Perform the bridge forwarding function with the copy. */
2362 bridge_forward(sc, bif, mc);
2363
2364 /*
2365 * Reinject the mbuf as arriving on the bridge so we have a
2366 * chance at claiming multicast packets. We can not loop back
2367 * here from ether_input as a bridge is never a member of a
2368 * bridge.
2369 */
2370 KASSERT(bifp->if_bridge == NULL,
2371 ("loop created in bridge_input"));
2372 mc2 = m_dup(m, M_NOWAIT);
2373 if (mc2 != NULL) {
2374 /* Keep the layer3 header aligned */
2375 int i = min(mc2->m_pkthdr.len, max_protohdr);
2376 mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2377 }
2378 if (mc2 != NULL) {
2379 mc2->m_pkthdr.rcvif = bifp;
2380 (*bifp->if_input)(bifp, mc2);
2381 }
2382
2383 /* Return the original packet for local processing. */
2384 return (m);
2385 }
2386
2387 if ((bif->bif_flags & IFBIF_STP) &&
2388 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2389 BRIDGE_UNLOCK(sc);
2390 return (m);
2391 }
2392
2393 #if (defined(INET) || defined(INET6))
2394 # define OR_CARP_CHECK_WE_ARE_DST(iface) \
2395 || ((iface)->if_carp \
2396 && (*carp_forus_p)((iface), eh->ether_dhost))
2397 # define OR_CARP_CHECK_WE_ARE_SRC(iface) \
2398 || ((iface)->if_carp \
2399 && (*carp_forus_p)((iface), eh->ether_shost))
2400 #else
2401 # define OR_CARP_CHECK_WE_ARE_DST(iface)
2402 # define OR_CARP_CHECK_WE_ARE_SRC(iface)
2403 #endif
2404
2405 #ifdef INET6
2406 # define OR_PFIL_HOOKED_INET6 \
2407 || PFIL_HOOKED(&V_inet6_pfil_hook)
2408 #else
2409 # define OR_PFIL_HOOKED_INET6
2410 #endif
2411
2412 #define GRAB_OUR_PACKETS(iface) \
2413 if ((iface)->if_type == IFT_GIF) \
2414 continue; \
2415 /* It is destined for us. */ \
2416 if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, ETHER_ADDR_LEN) == 0 \
2417 OR_CARP_CHECK_WE_ARE_DST((iface)) \
2418 ) { \
2419 if ((iface)->if_type == IFT_BRIDGE) { \
2420 ETHER_BPF_MTAP(iface, m); \
2421 if_inc_counter(iface, IFCOUNTER_IPACKETS, 1); \
2422 if_inc_counter(iface, IFCOUNTER_IBYTES, m->m_pkthdr.len); \
2423 /* Filter on the physical interface. */ \
2424 if (V_pfil_local_phys && \
2425 (PFIL_HOOKED(&V_inet_pfil_hook) \
2426 OR_PFIL_HOOKED_INET6)) { \
2427 if (bridge_pfil(&m, NULL, ifp, \
2428 PFIL_IN) != 0 || m == NULL) { \
2429 BRIDGE_UNLOCK(sc); \
2430 return (NULL); \
2431 } \
2432 eh = mtod(m, struct ether_header *); \
2433 } \
2434 } \
2435 if (bif->bif_flags & IFBIF_LEARNING) { \
2436 error = bridge_rtupdate(sc, eh->ether_shost, \
2437 vlan, bif, 0, IFBAF_DYNAMIC); \
2438 if (error && bif->bif_addrmax) { \
2439 BRIDGE_UNLOCK(sc); \
2440 m_freem(m); \
2441 return (NULL); \
2442 } \
2443 } \
2444 m->m_pkthdr.rcvif = iface; \
2445 BRIDGE_UNLOCK(sc); \
2446 return (m); \
2447 } \
2448 \
2449 /* We just received a packet that we sent out. */ \
2450 if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \
2451 OR_CARP_CHECK_WE_ARE_SRC((iface)) \
2452 ) { \
2453 BRIDGE_UNLOCK(sc); \
2454 m_freem(m); \
2455 return (NULL); \
2456 }
2457
2458 /*
2459 * Unicast. Make sure it's not for the bridge.
2460 */
2461 do { GRAB_OUR_PACKETS(bifp) } while (0);
2462
2463 /*
2464 * Give a chance for ifp at first priority. This will help when the
2465 * packet comes through the interface like VLAN's with the same MACs
2466 * on several interfaces from the same bridge. This also will save
2467 * some CPU cycles in case the destination interface and the input
2468 * interface (eq ifp) are the same.
2469 */
2470 do { GRAB_OUR_PACKETS(ifp) } while (0);
2471
2472 /* Now check the all bridge members. */
2473 LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
2474 GRAB_OUR_PACKETS(bif2->bif_ifp)
2475 }
2476
2477 #undef OR_CARP_CHECK_WE_ARE_DST
2478 #undef OR_CARP_CHECK_WE_ARE_SRC
2479 #undef OR_PFIL_HOOKED_INET6
2480 #undef GRAB_OUR_PACKETS
2481
2482 /* Perform the bridge forwarding function. */
2483 bridge_forward(sc, bif, m);
2484
2485 return (NULL);
2486 }
2487
2488 /*
2489 * bridge_broadcast:
2490 *
2491 * Send a frame to all interfaces that are members of
2492 * the bridge, except for the one on which the packet
2493 * arrived.
2494 *
2495 * NOTE: Releases the lock on return.
2496 */
2497 static void
2498 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2499 struct mbuf *m, int runfilt)
2500 {
2501 struct bridge_iflist *dbif, *sbif;
2502 struct mbuf *mc;
2503 struct ifnet *dst_if;
2504 int error = 0, used = 0, i;
2505
2506 sbif = bridge_lookup_member_if(sc, src_if);
2507
2508 BRIDGE_LOCK2REF(sc, error);
2509 if (error) {
2510 m_freem(m);
2511 return;
2512 }
2513
2514 /* Filter on the bridge interface before broadcasting */
2515 if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook)
2516 #ifdef INET6
2517 || PFIL_HOOKED(&V_inet6_pfil_hook)
2518 #endif
2519 )) {
2520 if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
2521 goto out;
2522 if (m == NULL)
2523 goto out;
2524 }
2525
2526 LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
2527 dst_if = dbif->bif_ifp;
2528 if (dst_if == src_if)
2529 continue;
2530
2531 /* Private segments can not talk to each other */
2532 if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
2533 continue;
2534
2535 if ((dbif->bif_flags & IFBIF_STP) &&
2536 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2537 continue;
2538
2539 if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
2540 (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2541 continue;
2542
2543 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2544 continue;
2545
2546 if (LIST_NEXT(dbif, bif_next) == NULL) {
2547 mc = m;
2548 used = 1;
2549 } else {
2550 mc = m_dup(m, M_NOWAIT);
2551 if (mc == NULL) {
2552 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2553 continue;
2554 }
2555 }
2556
2557 /*
2558 * Filter on the output interface. Pass a NULL bridge interface
2559 * pointer so we do not redundantly filter on the bridge for
2560 * each interface we broadcast on.
2561 */
2562 if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook)
2563 #ifdef INET6
2564 || PFIL_HOOKED(&V_inet6_pfil_hook)
2565 #endif
2566 )) {
2567 if (used == 0) {
2568 /* Keep the layer3 header aligned */
2569 i = min(mc->m_pkthdr.len, max_protohdr);
2570 mc = m_copyup(mc, i, ETHER_ALIGN);
2571 if (mc == NULL) {
2572 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2573 continue;
2574 }
2575 }
2576 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2577 continue;
2578 if (mc == NULL)
2579 continue;
2580 }
2581
2582 bridge_enqueue(sc, dst_if, mc);
2583 }
2584 if (used == 0)
2585 m_freem(m);
2586
2587 out:
2588 BRIDGE_UNREF(sc);
2589 }
2590
2591 /*
2592 * bridge_span:
2593 *
2594 * Duplicate a packet out one or more interfaces that are in span mode,
2595 * the original mbuf is unmodified.
2596 */
2597 static void
2598 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2599 {
2600 struct bridge_iflist *bif;
2601 struct ifnet *dst_if;
2602 struct mbuf *mc;
2603
2604 if (LIST_EMPTY(&sc->sc_spanlist))
2605 return;
2606
2607 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2608 dst_if = bif->bif_ifp;
2609
2610 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2611 continue;
2612
2613 mc = m_copypacket(m, M_NOWAIT);
2614 if (mc == NULL) {
2615 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2616 continue;
2617 }
2618
2619 bridge_enqueue(sc, dst_if, mc);
2620 }
2621 }
2622
2623 /*
2624 * bridge_rtupdate:
2625 *
2626 * Add a bridge routing entry.
2627 */
2628 static int
2629 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
2630 struct bridge_iflist *bif, int setflags, uint8_t flags)
2631 {
2632 struct bridge_rtnode *brt;
2633 int error;
2634
2635 BRIDGE_LOCK_ASSERT(sc);
2636
2637 /* Check the source address is valid and not multicast. */
2638 if (ETHER_IS_MULTICAST(dst) ||
2639 (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
2640 dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
2641 return (EINVAL);
2642
2643 /* 802.1p frames map to vlan 1 */
2644 if (vlan == 0)
2645 vlan = 1;
2646
2647 /*
2648 * A route for this destination might already exist. If so,
2649 * update it, otherwise create a new one.
2650 */
2651 if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
2652 if (sc->sc_brtcnt >= sc->sc_brtmax) {
2653 sc->sc_brtexceeded++;
2654 return (ENOSPC);
2655 }
2656 /* Check per interface address limits (if enabled) */
2657 if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
2658 bif->bif_addrexceeded++;
2659 return (ENOSPC);
2660 }
2661
2662 /*
2663 * Allocate a new bridge forwarding node, and
2664 * initialize the expiration time and Ethernet
2665 * address.
2666 */
2667 brt = uma_zalloc(bridge_rtnode_zone, M_NOWAIT | M_ZERO);
2668 if (brt == NULL)
2669 return (ENOMEM);
2670
2671 if (bif->bif_flags & IFBIF_STICKY)
2672 brt->brt_flags = IFBAF_STICKY;
2673 else
2674 brt->brt_flags = IFBAF_DYNAMIC;
2675
2676 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2677 brt->brt_vlan = vlan;
2678
2679 if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
2680 uma_zfree(bridge_rtnode_zone, brt);
2681 return (error);
2682 }
2683 brt->brt_dst = bif;
2684 bif->bif_addrcnt++;
2685 }
2686
2687 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2688 brt->brt_dst != bif) {
2689 brt->brt_dst->bif_addrcnt--;
2690 brt->brt_dst = bif;
2691 brt->brt_dst->bif_addrcnt++;
2692 }
2693
2694 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2695 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2696 if (setflags)
2697 brt->brt_flags = flags;
2698
2699 return (0);
2700 }
2701
2702 /*
2703 * bridge_rtlookup:
2704 *
2705 * Lookup the destination interface for an address.
2706 */
2707 static struct ifnet *
2708 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2709 {
2710 struct bridge_rtnode *brt;
2711
2712 BRIDGE_LOCK_ASSERT(sc);
2713
2714 if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
2715 return (NULL);
2716
2717 return (brt->brt_ifp);
2718 }
2719
2720 /*
2721 * bridge_rttrim:
2722 *
2723 * Trim the routine table so that we have a number
2724 * of routing entries less than or equal to the
2725 * maximum number.
2726 */
2727 static void
2728 bridge_rttrim(struct bridge_softc *sc)
2729 {
2730 struct bridge_rtnode *brt, *nbrt;
2731
2732 BRIDGE_LOCK_ASSERT(sc);
2733
2734 /* Make sure we actually need to do this. */
2735 if (sc->sc_brtcnt <= sc->sc_brtmax)
2736 return;
2737
2738 /* Force an aging cycle; this might trim enough addresses. */
2739 bridge_rtage(sc);
2740 if (sc->sc_brtcnt <= sc->sc_brtmax)
2741 return;
2742
2743 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2744 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2745 bridge_rtnode_destroy(sc, brt);
2746 if (sc->sc_brtcnt <= sc->sc_brtmax)
2747 return;
2748 }
2749 }
2750 }
2751
2752 /*
2753 * bridge_timer:
2754 *
2755 * Aging timer for the bridge.
2756 */
2757 static void
2758 bridge_timer(void *arg)
2759 {
2760 struct bridge_softc *sc = arg;
2761
2762 BRIDGE_LOCK_ASSERT(sc);
2763
2764 bridge_rtage(sc);
2765
2766 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
2767 callout_reset(&sc->sc_brcallout,
2768 bridge_rtable_prune_period * hz, bridge_timer, sc);
2769 }
2770
2771 /*
2772 * bridge_rtage:
2773 *
2774 * Perform an aging cycle.
2775 */
2776 static void
2777 bridge_rtage(struct bridge_softc *sc)
2778 {
2779 struct bridge_rtnode *brt, *nbrt;
2780
2781 BRIDGE_LOCK_ASSERT(sc);
2782
2783 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2784 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2785 if (time_uptime >= brt->brt_expire)
2786 bridge_rtnode_destroy(sc, brt);
2787 }
2788 }
2789 }
2790
2791 /*
2792 * bridge_rtflush:
2793 *
2794 * Remove all dynamic addresses from the bridge.
2795 */
2796 static void
2797 bridge_rtflush(struct bridge_softc *sc, int full)
2798 {
2799 struct bridge_rtnode *brt, *nbrt;
2800
2801 BRIDGE_LOCK_ASSERT(sc);
2802
2803 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2804 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2805 bridge_rtnode_destroy(sc, brt);
2806 }
2807 }
2808
2809 /*
2810 * bridge_rtdaddr:
2811 *
2812 * Remove an address from the table.
2813 */
2814 static int
2815 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2816 {
2817 struct bridge_rtnode *brt;
2818 int found = 0;
2819
2820 BRIDGE_LOCK_ASSERT(sc);
2821
2822 /*
2823 * If vlan is zero then we want to delete for all vlans so the lookup
2824 * may return more than one.
2825 */
2826 while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
2827 bridge_rtnode_destroy(sc, brt);
2828 found = 1;
2829 }
2830
2831 return (found ? 0 : ENOENT);
2832 }
2833
2834 /*
2835 * bridge_rtdelete:
2836 *
2837 * Delete routes to a speicifc member interface.
2838 */
2839 static void
2840 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
2841 {
2842 struct bridge_rtnode *brt, *nbrt;
2843
2844 BRIDGE_LOCK_ASSERT(sc);
2845
2846 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2847 if (brt->brt_ifp == ifp && (full ||
2848 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
2849 bridge_rtnode_destroy(sc, brt);
2850 }
2851 }
2852
2853 /*
2854 * bridge_rtable_init:
2855 *
2856 * Initialize the route table for this bridge.
2857 */
2858 static void
2859 bridge_rtable_init(struct bridge_softc *sc)
2860 {
2861 int i;
2862
2863 sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2864 M_DEVBUF, M_WAITOK);
2865
2866 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2867 LIST_INIT(&sc->sc_rthash[i]);
2868
2869 sc->sc_rthash_key = arc4random();
2870 LIST_INIT(&sc->sc_rtlist);
2871 }
2872
2873 /*
2874 * bridge_rtable_fini:
2875 *
2876 * Deconstruct the route table for this bridge.
2877 */
2878 static void
2879 bridge_rtable_fini(struct bridge_softc *sc)
2880 {
2881
2882 KASSERT(sc->sc_brtcnt == 0,
2883 ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
2884 free(sc->sc_rthash, M_DEVBUF);
2885 }
2886
2887 /*
2888 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2889 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2890 */
2891 #define mix(a, b, c) \
2892 do { \
2893 a -= b; a -= c; a ^= (c >> 13); \
2894 b -= c; b -= a; b ^= (a << 8); \
2895 c -= a; c -= b; c ^= (b >> 13); \
2896 a -= b; a -= c; a ^= (c >> 12); \
2897 b -= c; b -= a; b ^= (a << 16); \
2898 c -= a; c -= b; c ^= (b >> 5); \
2899 a -= b; a -= c; a ^= (c >> 3); \
2900 b -= c; b -= a; b ^= (a << 10); \
2901 c -= a; c -= b; c ^= (b >> 15); \
2902 } while (/*CONSTCOND*/0)
2903
2904 static __inline uint32_t
2905 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2906 {
2907 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2908
2909 b += addr[5] << 8;
2910 b += addr[4];
2911 a += addr[3] << 24;
2912 a += addr[2] << 16;
2913 a += addr[1] << 8;
2914 a += addr[0];
2915
2916 mix(a, b, c);
2917
2918 return (c & BRIDGE_RTHASH_MASK);
2919 }
2920
2921 #undef mix
2922
2923 static int
2924 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
2925 {
2926 int i, d;
2927
2928 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
2929 d = ((int)a[i]) - ((int)b[i]);
2930 }
2931
2932 return (d);
2933 }
2934
2935 /*
2936 * bridge_rtnode_lookup:
2937 *
2938 * Look up a bridge route node for the specified destination. Compare the
2939 * vlan id or if zero then just return the first match.
2940 */
2941 static struct bridge_rtnode *
2942 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2943 {
2944 struct bridge_rtnode *brt;
2945 uint32_t hash;
2946 int dir;
2947
2948 BRIDGE_LOCK_ASSERT(sc);
2949
2950 hash = bridge_rthash(sc, addr);
2951 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
2952 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
2953 if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0))
2954 return (brt);
2955 if (dir > 0)
2956 return (NULL);
2957 }
2958
2959 return (NULL);
2960 }
2961
2962 /*
2963 * bridge_rtnode_insert:
2964 *
2965 * Insert the specified bridge node into the route table. We
2966 * assume the entry is not already in the table.
2967 */
2968 static int
2969 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2970 {
2971 struct bridge_rtnode *lbrt;
2972 uint32_t hash;
2973 int dir;
2974
2975 BRIDGE_LOCK_ASSERT(sc);
2976
2977 hash = bridge_rthash(sc, brt->brt_addr);
2978
2979 lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
2980 if (lbrt == NULL) {
2981 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
2982 goto out;
2983 }
2984
2985 do {
2986 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
2987 if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
2988 return (EEXIST);
2989 if (dir > 0) {
2990 LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
2991 goto out;
2992 }
2993 if (LIST_NEXT(lbrt, brt_hash) == NULL) {
2994 LIST_INSERT_AFTER(lbrt, brt, brt_hash);
2995 goto out;
2996 }
2997 lbrt = LIST_NEXT(lbrt, brt_hash);
2998 } while (lbrt != NULL);
2999
3000 #ifdef DIAGNOSTIC
3001 panic("bridge_rtnode_insert: impossible");
3002 #endif
3003
3004 out:
3005 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
3006 sc->sc_brtcnt++;
3007
3008 return (0);
3009 }
3010
3011 /*
3012 * bridge_rtnode_destroy:
3013 *
3014 * Destroy a bridge rtnode.
3015 */
3016 static void
3017 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3018 {
3019 BRIDGE_LOCK_ASSERT(sc);
3020
3021 LIST_REMOVE(brt, brt_hash);
3022
3023 LIST_REMOVE(brt, brt_list);
3024 sc->sc_brtcnt--;
3025 brt->brt_dst->bif_addrcnt--;
3026 uma_zfree(bridge_rtnode_zone, brt);
3027 }
3028
3029 /*
3030 * bridge_rtable_expire:
3031 *
3032 * Set the expiry time for all routes on an interface.
3033 */
3034 static void
3035 bridge_rtable_expire(struct ifnet *ifp, int age)
3036 {
3037 struct bridge_softc *sc = ifp->if_bridge;
3038 struct bridge_rtnode *brt;
3039
3040 BRIDGE_LOCK(sc);
3041
3042 /*
3043 * If the age is zero then flush, otherwise set all the expiry times to
3044 * age for the interface
3045 */
3046 if (age == 0)
3047 bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
3048 else {
3049 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
3050 /* Cap the expiry time to 'age' */
3051 if (brt->brt_ifp == ifp &&
3052 brt->brt_expire > time_uptime + age &&
3053 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3054 brt->brt_expire = time_uptime + age;
3055 }
3056 }
3057 BRIDGE_UNLOCK(sc);
3058 }
3059
3060 /*
3061 * bridge_state_change:
3062 *
3063 * Callback from the bridgestp code when a port changes states.
3064 */
3065 static void
3066 bridge_state_change(struct ifnet *ifp, int state)
3067 {
3068 struct bridge_softc *sc = ifp->if_bridge;
3069 static const char *stpstates[] = {
3070 "disabled",
3071 "listening",
3072 "learning",
3073 "forwarding",
3074 "blocking",
3075 "discarding"
3076 };
3077
3078 CURVNET_SET(ifp->if_vnet);
3079 if (V_log_stp)
3080 log(LOG_NOTICE, "%s: state changed to %s on %s\n",
3081 sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
3082 CURVNET_RESTORE();
3083 }
3084
3085 /*
3086 * Send bridge packets through pfil if they are one of the types pfil can deal
3087 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
3088 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3089 * that interface.
3090 */
3091 static int
3092 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3093 {
3094 int snap, error, i, hlen;
3095 struct ether_header *eh1, eh2;
3096 struct ip *ip;
3097 struct llc llc1;
3098 u_int16_t ether_type;
3099
3100 snap = 0;
3101 error = -1; /* Default error if not error == 0 */
3102
3103 #if 0
3104 /* we may return with the IP fields swapped, ensure its not shared */
3105 KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
3106 #endif
3107
3108 if (V_pfil_bridge == 0 && V_pfil_member == 0 && V_pfil_ipfw == 0)
3109 return (0); /* filtering is disabled */
3110
3111 i = min((*mp)->m_pkthdr.len, max_protohdr);
3112 if ((*mp)->m_len < i) {
3113 *mp = m_pullup(*mp, i);
3114 if (*mp == NULL) {
3115 printf("%s: m_pullup failed\n", __func__);
3116 return (-1);
3117 }
3118 }
3119
3120 eh1 = mtod(*mp, struct ether_header *);
3121 ether_type = ntohs(eh1->ether_type);
3122
3123 /*
3124 * Check for SNAP/LLC.
3125 */
3126 if (ether_type < ETHERMTU) {
3127 struct llc *llc2 = (struct llc *)(eh1 + 1);
3128
3129 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3130 llc2->llc_dsap == LLC_SNAP_LSAP &&
3131 llc2->llc_ssap == LLC_SNAP_LSAP &&
3132 llc2->llc_control == LLC_UI) {
3133 ether_type = htons(llc2->llc_un.type_snap.ether_type);
3134 snap = 1;
3135 }
3136 }
3137
3138 /*
3139 * If we're trying to filter bridge traffic, don't look at anything
3140 * other than IP and ARP traffic. If the filter doesn't understand
3141 * IPv6, don't allow IPv6 through the bridge either. This is lame
3142 * since if we really wanted, say, an AppleTalk filter, we are hosed,
3143 * but of course we don't have an AppleTalk filter to begin with.
3144 * (Note that since pfil doesn't understand ARP it will pass *ALL*
3145 * ARP traffic.)
3146 */
3147 switch (ether_type) {
3148 case ETHERTYPE_ARP:
3149 case ETHERTYPE_REVARP:
3150 if (V_pfil_ipfw_arp == 0)
3151 return (0); /* Automatically pass */
3152 break;
3153
3154 case ETHERTYPE_IP:
3155 #ifdef INET6
3156 case ETHERTYPE_IPV6:
3157 #endif /* INET6 */
3158 break;
3159 default:
3160 /*
3161 * Check to see if the user wants to pass non-ip
3162 * packets, these will not be checked by pfil(9) and
3163 * passed unconditionally so the default is to drop.
3164 */
3165 if (V_pfil_onlyip)
3166 goto bad;
3167 }
3168
3169 /* Run the packet through pfil before stripping link headers */
3170 if (PFIL_HOOKED(&V_link_pfil_hook) && V_pfil_ipfw != 0 &&
3171 dir == PFIL_OUT && ifp != NULL) {
3172
3173 error = pfil_run_hooks(&V_link_pfil_hook, mp, ifp, dir, 0,
3174 NULL);
3175
3176 if (*mp == NULL || error != 0) /* packet consumed by filter */
3177 return (error);
3178 }
3179
3180 /* Strip off the Ethernet header and keep a copy. */
3181 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3182 m_adj(*mp, ETHER_HDR_LEN);
3183
3184 /* Strip off snap header, if present */
3185 if (snap) {
3186 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3187 m_adj(*mp, sizeof(struct llc));
3188 }
3189
3190 /*
3191 * Check the IP header for alignment and errors
3192 */
3193 if (dir == PFIL_IN) {
3194 switch (ether_type) {
3195 case ETHERTYPE_IP:
3196 error = bridge_ip_checkbasic(mp);
3197 break;
3198 #ifdef INET6
3199 case ETHERTYPE_IPV6:
3200 error = bridge_ip6_checkbasic(mp);
3201 break;
3202 #endif /* INET6 */
3203 default:
3204 error = 0;
3205 }
3206 if (error)
3207 goto bad;
3208 }
3209
3210 error = 0;
3211
3212 /*
3213 * Run the packet through pfil
3214 */
3215 switch (ether_type) {
3216 case ETHERTYPE_IP:
3217 /*
3218 * Run pfil on the member interface and the bridge, both can
3219 * be skipped by clearing pfil_member or pfil_bridge.
3220 *
3221 * Keep the order:
3222 * in_if -> bridge_if -> out_if
3223 */
3224 if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3225 error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp,
3226 dir, 0, NULL);
3227
3228 if (*mp == NULL || error != 0) /* filter may consume */
3229 break;
3230
3231 if (V_pfil_member && ifp != NULL)
3232 error = pfil_run_hooks(&V_inet_pfil_hook, mp, ifp,
3233 dir, 0, NULL);
3234
3235 if (*mp == NULL || error != 0) /* filter may consume */
3236 break;
3237
3238 if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL)
3239 error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp,
3240 dir, 0, NULL);
3241
3242 if (*mp == NULL || error != 0) /* filter may consume */
3243 break;
3244
3245 /* check if we need to fragment the packet */
3246 /* bridge_fragment generates a mbuf chain of packets */
3247 /* that already include eth headers */
3248 if (V_pfil_member && ifp != NULL && dir == PFIL_OUT) {
3249 i = (*mp)->m_pkthdr.len;
3250 if (i > ifp->if_mtu) {
3251 error = bridge_fragment(ifp, mp, &eh2, snap,
3252 &llc1);
3253 return (error);
3254 }
3255 }
3256
3257 /* Recalculate the ip checksum. */
3258 ip = mtod(*mp, struct ip *);
3259 hlen = ip->ip_hl << 2;
3260 if (hlen < sizeof(struct ip))
3261 goto bad;
3262 if (hlen > (*mp)->m_len) {
3263 if ((*mp = m_pullup(*mp, hlen)) == NULL)
3264 goto bad;
3265 ip = mtod(*mp, struct ip *);
3266 if (ip == NULL)
3267 goto bad;
3268 }
3269 ip->ip_sum = 0;
3270 if (hlen == sizeof(struct ip))
3271 ip->ip_sum = in_cksum_hdr(ip);
3272 else
3273 ip->ip_sum = in_cksum(*mp, hlen);
3274
3275 break;
3276 #ifdef INET6
3277 case ETHERTYPE_IPV6:
3278 if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3279 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp,
3280 dir, 0, NULL);
3281
3282 if (*mp == NULL || error != 0) /* filter may consume */
3283 break;
3284
3285 if (V_pfil_member && ifp != NULL)
3286 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, ifp,
3287 dir, 0, NULL);
3288
3289 if (*mp == NULL || error != 0) /* filter may consume */
3290 break;
3291
3292 if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL)
3293 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp,
3294 dir, 0, NULL);
3295 break;
3296 #endif
3297 default:
3298 error = 0;
3299 break;
3300 }
3301
3302 if (*mp == NULL)
3303 return (error);
3304 if (error != 0)
3305 goto bad;
3306
3307 error = -1;
3308
3309 /*
3310 * Finally, put everything back the way it was and return
3311 */
3312 if (snap) {
3313 M_PREPEND(*mp, sizeof(struct llc), M_NOWAIT);
3314 if (*mp == NULL)
3315 return (error);
3316 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3317 }
3318
3319 M_PREPEND(*mp, ETHER_HDR_LEN, M_NOWAIT);
3320 if (*mp == NULL)
3321 return (error);
3322 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3323
3324 return (0);
3325
3326 bad:
3327 m_freem(*mp);
3328 *mp = NULL;
3329 return (error);
3330 }
3331
3332 /*
3333 * Perform basic checks on header size since
3334 * pfil assumes ip_input has already processed
3335 * it for it. Cut-and-pasted from ip_input.c.
3336 * Given how simple the IPv6 version is,
3337 * does the IPv4 version really need to be
3338 * this complicated?
3339 *
3340 * XXX Should we update ipstat here, or not?
3341 * XXX Right now we update ipstat but not
3342 * XXX csum_counter.
3343 */
3344 static int
3345 bridge_ip_checkbasic(struct mbuf **mp)
3346 {
3347 struct mbuf *m = *mp;
3348 struct ip *ip;
3349 int len, hlen;
3350 u_short sum;
3351
3352 if (*mp == NULL)
3353 return (-1);
3354
3355 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3356 if ((m = m_copyup(m, sizeof(struct ip),
3357 (max_linkhdr + 3) & ~3)) == NULL) {
3358 /* XXXJRT new stat, please */
3359 KMOD_IPSTAT_INC(ips_toosmall);
3360 goto bad;
3361 }
3362 } else if (__predict_false(m->m_len < sizeof (struct ip))) {
3363 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3364 KMOD_IPSTAT_INC(ips_toosmall);
3365 goto bad;
3366 }
3367 }
3368 ip = mtod(m, struct ip *);
3369 if (ip == NULL) goto bad;
3370
3371 if (ip->ip_v != IPVERSION) {
3372 KMOD_IPSTAT_INC(ips_badvers);
3373 goto bad;
3374 }
3375 hlen = ip->ip_hl << 2;
3376 if (hlen < sizeof(struct ip)) { /* minimum header length */
3377 KMOD_IPSTAT_INC(ips_badhlen);
3378 goto bad;
3379 }
3380 if (hlen > m->m_len) {
3381 if ((m = m_pullup(m, hlen)) == NULL) {
3382 KMOD_IPSTAT_INC(ips_badhlen);
3383 goto bad;
3384 }
3385 ip = mtod(m, struct ip *);
3386 if (ip == NULL) goto bad;
3387 }
3388
3389 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3390 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3391 } else {
3392 if (hlen == sizeof(struct ip)) {
3393 sum = in_cksum_hdr(ip);
3394 } else {
3395 sum = in_cksum(m, hlen);
3396 }
3397 }
3398 if (sum) {
3399 KMOD_IPSTAT_INC(ips_badsum);
3400 goto bad;
3401 }
3402
3403 /* Retrieve the packet length. */
3404 len = ntohs(ip->ip_len);
3405
3406 /*
3407 * Check for additional length bogosity
3408 */
3409 if (len < hlen) {
3410 KMOD_IPSTAT_INC(ips_badlen);
3411 goto bad;
3412 }
3413
3414 /*
3415 * Check that the amount of data in the buffers
3416 * is as at least much as the IP header would have us expect.
3417 * Drop packet if shorter than we expect.
3418 */
3419 if (m->m_pkthdr.len < len) {
3420 KMOD_IPSTAT_INC(ips_tooshort);
3421 goto bad;
3422 }
3423
3424 /* Checks out, proceed */
3425 *mp = m;
3426 return (0);
3427
3428 bad:
3429 *mp = m;
3430 return (-1);
3431 }
3432
3433 #ifdef INET6
3434 /*
3435 * Same as above, but for IPv6.
3436 * Cut-and-pasted from ip6_input.c.
3437 * XXX Should we update ip6stat, or not?
3438 */
3439 static int
3440 bridge_ip6_checkbasic(struct mbuf **mp)
3441 {
3442 struct mbuf *m = *mp;
3443 struct ip6_hdr *ip6;
3444
3445 /*
3446 * If the IPv6 header is not aligned, slurp it up into a new
3447 * mbuf with space for link headers, in the event we forward
3448 * it. Otherwise, if it is aligned, make sure the entire base
3449 * IPv6 header is in the first mbuf of the chain.
3450 */
3451 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3452 struct ifnet *inifp = m->m_pkthdr.rcvif;
3453 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3454 (max_linkhdr + 3) & ~3)) == NULL) {
3455 /* XXXJRT new stat, please */
3456 IP6STAT_INC(ip6s_toosmall);
3457 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3458 goto bad;
3459 }
3460 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3461 struct ifnet *inifp = m->m_pkthdr.rcvif;
3462 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3463 IP6STAT_INC(ip6s_toosmall);
3464 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3465 goto bad;
3466 }
3467 }
3468
3469 ip6 = mtod(m, struct ip6_hdr *);
3470
3471 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3472 IP6STAT_INC(ip6s_badvers);
3473 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3474 goto bad;
3475 }
3476
3477 /* Checks out, proceed */
3478 *mp = m;
3479 return (0);
3480
3481 bad:
3482 *mp = m;
3483 return (-1);
3484 }
3485 #endif /* INET6 */
3486
3487 /*
3488 * bridge_fragment:
3489 *
3490 * Fragment mbuf chain in multiple packets and prepend ethernet header.
3491 */
3492 static int
3493 bridge_fragment(struct ifnet *ifp, struct mbuf **mp, struct ether_header *eh,
3494 int snap, struct llc *llc)
3495 {
3496 struct mbuf *m = *mp, *nextpkt = NULL, *mprev = NULL, *mcur = NULL;
3497 struct ip *ip;
3498 int error = -1;
3499
3500 if (m->m_len < sizeof(struct ip) &&
3501 (m = m_pullup(m, sizeof(struct ip))) == NULL)
3502 goto dropit;
3503 ip = mtod(m, struct ip *);
3504
3505 m->m_pkthdr.csum_flags |= CSUM_IP;
3506 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist);
3507 if (error)
3508 goto dropit;
3509
3510 /*
3511 * Walk the chain and re-add the Ethernet header for
3512 * each mbuf packet.
3513 */
3514 for (mcur = m; mcur; mcur = mcur->m_nextpkt) {
3515 nextpkt = mcur->m_nextpkt;
3516 mcur->m_nextpkt = NULL;
3517 if (snap) {
3518 M_PREPEND(mcur, sizeof(struct llc), M_NOWAIT);
3519 if (mcur == NULL) {
3520 error = ENOBUFS;
3521 if (mprev != NULL)
3522 mprev->m_nextpkt = nextpkt;
3523 goto dropit;
3524 }
3525 bcopy(llc, mtod(mcur, caddr_t),sizeof(struct llc));
3526 }
3527
3528 M_PREPEND(mcur, ETHER_HDR_LEN, M_NOWAIT);
3529 if (mcur == NULL) {
3530 error = ENOBUFS;
3531 if (mprev != NULL)
3532 mprev->m_nextpkt = nextpkt;
3533 goto dropit;
3534 }
3535 bcopy(eh, mtod(mcur, caddr_t), ETHER_HDR_LEN);
3536
3537 /*
3538 * The previous two M_PREPEND could have inserted one or two
3539 * mbufs in front so we have to update the previous packet's
3540 * m_nextpkt.
3541 */
3542 mcur->m_nextpkt = nextpkt;
3543 if (mprev != NULL)
3544 mprev->m_nextpkt = mcur;
3545 else {
3546 /* The first mbuf in the original chain needs to be
3547 * updated. */
3548 *mp = mcur;
3549 }
3550 mprev = mcur;
3551 }
3552
3553 KMOD_IPSTAT_INC(ips_fragmented);
3554 return (error);
3555
3556 dropit:
3557 for (mcur = *mp; mcur; mcur = m) { /* droping the full packet chain */
3558 m = mcur->m_nextpkt;
3559 m_freem(mcur);
3560 }
3561 return (error);
3562 }
3563
3564 static void
3565 bridge_linkstate(struct ifnet *ifp)
3566 {
3567 struct bridge_softc *sc = ifp->if_bridge;
3568 struct bridge_iflist *bif;
3569
3570 BRIDGE_LOCK(sc);
3571 bif = bridge_lookup_member_if(sc, ifp);
3572 if (bif == NULL) {
3573 BRIDGE_UNLOCK(sc);
3574 return;
3575 }
3576 bridge_linkcheck(sc);
3577 BRIDGE_UNLOCK(sc);
3578
3579 bstp_linkstate(&bif->bif_stp);
3580 }
3581
3582 static void
3583 bridge_linkcheck(struct bridge_softc *sc)
3584 {
3585 struct bridge_iflist *bif;
3586 int new_link, hasls;
3587
3588 BRIDGE_LOCK_ASSERT(sc);
3589 new_link = LINK_STATE_DOWN;
3590 hasls = 0;
3591 /* Our link is considered up if at least one of our ports is active */
3592 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
3593 if (bif->bif_ifp->if_capabilities & IFCAP_LINKSTATE)
3594 hasls++;
3595 if (bif->bif_ifp->if_link_state == LINK_STATE_UP) {
3596 new_link = LINK_STATE_UP;
3597 break;
3598 }
3599 }
3600 if (!LIST_EMPTY(&sc->sc_iflist) && !hasls) {
3601 /* If no interfaces support link-state then we default to up */
3602 new_link = LINK_STATE_UP;
3603 }
3604 if_link_state_change(sc->sc_ifp, new_link);
3605 }
Cache object: ba330c8aeae24001ec270a33ad6ffb79
|