FreeBSD/Linux Kernel Cross Reference
sys/net/if.c
1 /*-
2 * Copyright (c) 1980, 1986, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)if.c 8.5 (Berkeley) 1/9/95
30 * $FreeBSD: releng/8.2/sys/net/if.c 215342 2010-11-15 17:48:13Z sobomax $
31 */
32
33 #include "opt_compat.h"
34 #include "opt_inet6.h"
35 #include "opt_inet.h"
36
37 #include <sys/param.h>
38 #include <sys/types.h>
39 #include <sys/conf.h>
40 #include <sys/malloc.h>
41 #include <sys/sbuf.h>
42 #include <sys/bus.h>
43 #include <sys/mbuf.h>
44 #include <sys/systm.h>
45 #include <sys/priv.h>
46 #include <sys/proc.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/protosw.h>
50 #include <sys/kernel.h>
51 #include <sys/lock.h>
52 #include <sys/refcount.h>
53 #include <sys/module.h>
54 #include <sys/rwlock.h>
55 #include <sys/sockio.h>
56 #include <sys/syslog.h>
57 #include <sys/sysctl.h>
58 #include <sys/taskqueue.h>
59 #include <sys/domain.h>
60 #include <sys/jail.h>
61 #include <machine/stdarg.h>
62 #include <vm/uma.h>
63
64 #include <net/if.h>
65 #include <net/if_arp.h>
66 #include <net/if_clone.h>
67 #include <net/if_dl.h>
68 #include <net/if_types.h>
69 #include <net/if_var.h>
70 #include <net/radix.h>
71 #include <net/route.h>
72 #include <net/vnet.h>
73
74 #if defined(INET) || defined(INET6)
75 /*XXX*/
76 #include <netinet/in.h>
77 #include <netinet/in_var.h>
78 #include <netinet/ip_carp.h>
79 #ifdef INET6
80 #include <netinet6/in6_var.h>
81 #include <netinet6/in6_ifattach.h>
82 #endif
83 #endif
84 #ifdef INET
85 #include <netinet/if_ether.h>
86 #endif
87
88 #include <security/mac/mac_framework.h>
89
90 #ifdef COMPAT_FREEBSD32
91 #include <sys/mount.h>
92 #include <compat/freebsd32/freebsd32.h>
93 #endif
94
95 struct ifindex_entry {
96 struct ifnet *ife_ifnet;
97 };
98
99 static int slowtimo_started;
100
101 SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers");
102 SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management");
103
104 TUNABLE_INT("net.link.ifqmaxlen", &ifqmaxlen);
105 SYSCTL_UINT(_net_link, OID_AUTO, ifqmaxlen, CTLFLAG_RDTUN,
106 &ifqmaxlen, 0, "max send queue size");
107
108 /* Log link state change events */
109 static int log_link_state_change = 1;
110
111 SYSCTL_INT(_net_link, OID_AUTO, log_link_state_change, CTLFLAG_RW,
112 &log_link_state_change, 0,
113 "log interface link state change events");
114
115 /* Interface description */
116 static unsigned int ifdescr_maxlen = 1024;
117 SYSCTL_UINT(_net, OID_AUTO, ifdescr_maxlen, CTLFLAG_RW,
118 &ifdescr_maxlen, 0,
119 "administrative maximum length for interface description");
120
121 MALLOC_DEFINE(M_IFDESCR, "ifdescr", "ifnet descriptions");
122
123 /* global sx for non-critical path ifdescr */
124 static struct sx ifdescr_sx;
125 SX_SYSINIT(ifdescr_sx, &ifdescr_sx, "ifnet descr");
126
127 void (*bstp_linkstate_p)(struct ifnet *ifp, int state);
128 void (*ng_ether_link_state_p)(struct ifnet *ifp, int state);
129 void (*lagg_linkstate_p)(struct ifnet *ifp, int state);
130 /* These are external hooks for CARP. */
131 void (*carp_linkstate_p)(struct ifnet *ifp);
132 #if defined(INET) || defined(INET6)
133 struct ifnet *(*carp_forus_p)(struct ifnet *ifp, u_char *dhost);
134 int (*carp_output_p)(struct ifnet *ifp, struct mbuf *m,
135 struct sockaddr *sa, struct rtentry *rt);
136 #endif
137 #ifdef INET
138 int (*carp_iamatch_p)(struct ifnet *, struct in_ifaddr *, struct in_addr *,
139 u_int8_t **);
140 #endif
141 #ifdef INET6
142 struct ifaddr *(*carp_iamatch6_p)(struct ifnet *ifp, struct in6_addr *taddr6);
143 caddr_t (*carp_macmatch6_p)(struct ifnet *ifp, struct mbuf *m,
144 const struct in6_addr *taddr);
145 #endif
146
147 struct mbuf *(*tbr_dequeue_ptr)(struct ifaltq *, int) = NULL;
148
149 /*
150 * XXX: Style; these should be sorted alphabetically, and unprototyped
151 * static functions should be prototyped. Currently they are sorted by
152 * declaration order.
153 */
154 static void if_attachdomain(void *);
155 static void if_attachdomain1(struct ifnet *);
156 static int ifconf(u_long, caddr_t);
157 static void if_freemulti(struct ifmultiaddr *);
158 static void if_init(void *);
159 static void if_grow(void);
160 static void if_check(void *);
161 static void if_route(struct ifnet *, int flag, int fam);
162 static int if_setflag(struct ifnet *, int, int, int *, int);
163 static void if_slowtimo(void *);
164 static int if_transmit(struct ifnet *ifp, struct mbuf *m);
165 static void if_unroute(struct ifnet *, int flag, int fam);
166 static void link_rtrequest(int, struct rtentry *, struct rt_addrinfo *);
167 static int if_rtdel(struct radix_node *, void *);
168 static int ifhwioctl(u_long, struct ifnet *, caddr_t, struct thread *);
169 static int if_delmulti_locked(struct ifnet *, struct ifmultiaddr *, int);
170 static void do_link_state_change(void *, int);
171 static int if_getgroup(struct ifgroupreq *, struct ifnet *);
172 static int if_getgroupmembers(struct ifgroupreq *);
173 static void if_delgroups(struct ifnet *);
174 static void if_attach_internal(struct ifnet *, int);
175 static void if_detach_internal(struct ifnet *, int);
176
177 #ifdef INET6
178 /*
179 * XXX: declare here to avoid to include many inet6 related files..
180 * should be more generalized?
181 */
182 extern void nd6_setmtu(struct ifnet *);
183 #endif
184
185 VNET_DEFINE(int, if_index);
186 int ifqmaxlen = IFQ_MAXLEN;
187 VNET_DEFINE(struct ifnethead, ifnet); /* depend on static init XXX */
188 VNET_DEFINE(struct ifgrouphead, ifg_head);
189
190 static VNET_DEFINE(int, if_indexlim) = 8;
191
192 /* Table of ifnet by index. */
193 VNET_DEFINE(struct ifindex_entry *, ifindex_table);
194
195 #define V_if_indexlim VNET(if_indexlim)
196 #define V_ifindex_table VNET(ifindex_table)
197
198 /*
199 * The global network interface list (V_ifnet) and related state (such as
200 * if_index, if_indexlim, and ifindex_table) are protected by an sxlock and
201 * an rwlock. Either may be acquired shared to stablize the list, but both
202 * must be acquired writable to modify the list. This model allows us to
203 * both stablize the interface list during interrupt thread processing, but
204 * also to stablize it over long-running ioctls, without introducing priority
205 * inversions and deadlocks.
206 */
207 struct rwlock ifnet_rwlock;
208 struct sx ifnet_sxlock;
209
210 /*
211 * The allocation of network interfaces is a rather non-atomic affair; we
212 * need to select an index before we are ready to expose the interface for
213 * use, so will use this pointer value to indicate reservation.
214 */
215 #define IFNET_HOLD (void *)(uintptr_t)(-1)
216
217 static if_com_alloc_t *if_com_alloc[256];
218 static if_com_free_t *if_com_free[256];
219
220 /*
221 * System initialization
222 */
223 SYSINIT(interface_check, SI_SUB_PROTO_IF, SI_ORDER_FIRST, if_check, NULL);
224
225 MALLOC_DEFINE(M_IFNET, "ifnet", "interface internals");
226 MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address");
227 MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address");
228
229 struct ifnet *
230 ifnet_byindex_locked(u_short idx)
231 {
232
233 if (idx > V_if_index)
234 return (NULL);
235 if (V_ifindex_table[idx].ife_ifnet == IFNET_HOLD)
236 return (NULL);
237 return (V_ifindex_table[idx].ife_ifnet);
238 }
239
240 struct ifnet *
241 ifnet_byindex(u_short idx)
242 {
243 struct ifnet *ifp;
244
245 IFNET_RLOCK_NOSLEEP();
246 ifp = ifnet_byindex_locked(idx);
247 IFNET_RUNLOCK_NOSLEEP();
248 return (ifp);
249 }
250
251 struct ifnet *
252 ifnet_byindex_ref(u_short idx)
253 {
254 struct ifnet *ifp;
255
256 IFNET_RLOCK_NOSLEEP();
257 ifp = ifnet_byindex_locked(idx);
258 if (ifp == NULL || (ifp->if_flags & IFF_DYING)) {
259 IFNET_RUNLOCK_NOSLEEP();
260 return (NULL);
261 }
262 if_ref(ifp);
263 IFNET_RUNLOCK_NOSLEEP();
264 return (ifp);
265 }
266
267 /*
268 * Allocate an ifindex array entry; return 0 on success or an error on
269 * failure.
270 */
271 static int
272 ifindex_alloc_locked(u_short *idxp)
273 {
274 u_short idx;
275
276 IFNET_WLOCK_ASSERT();
277
278 /*
279 * Try to find an empty slot below V_if_index. If we fail, take the
280 * next slot.
281 */
282 for (idx = 1; idx <= V_if_index; idx++) {
283 if (V_ifindex_table[idx].ife_ifnet == NULL)
284 break;
285 }
286
287 /* Catch if_index overflow. */
288 if (idx < 1)
289 return (ENOSPC);
290 if (idx > V_if_index)
291 V_if_index = idx;
292 if (V_if_index >= V_if_indexlim)
293 if_grow();
294 *idxp = idx;
295 return (0);
296 }
297
298 static void
299 ifindex_free_locked(u_short idx)
300 {
301
302 IFNET_WLOCK_ASSERT();
303
304 V_ifindex_table[idx].ife_ifnet = NULL;
305 while (V_if_index > 0 &&
306 V_ifindex_table[V_if_index].ife_ifnet == NULL)
307 V_if_index--;
308 }
309
310 static void
311 ifindex_free(u_short idx)
312 {
313
314 IFNET_WLOCK();
315 ifindex_free_locked(idx);
316 IFNET_WUNLOCK();
317 }
318
319 static void
320 ifnet_setbyindex_locked(u_short idx, struct ifnet *ifp)
321 {
322
323 IFNET_WLOCK_ASSERT();
324
325 V_ifindex_table[idx].ife_ifnet = ifp;
326 }
327
328 static void
329 ifnet_setbyindex(u_short idx, struct ifnet *ifp)
330 {
331
332 IFNET_WLOCK();
333 ifnet_setbyindex_locked(idx, ifp);
334 IFNET_WUNLOCK();
335 }
336
337 struct ifaddr *
338 ifaddr_byindex(u_short idx)
339 {
340 struct ifaddr *ifa;
341
342 IFNET_RLOCK_NOSLEEP();
343 ifa = ifnet_byindex_locked(idx)->if_addr;
344 if (ifa != NULL)
345 ifa_ref(ifa);
346 IFNET_RUNLOCK_NOSLEEP();
347 return (ifa);
348 }
349
350 /*
351 * Network interface utility routines.
352 *
353 * Routines with ifa_ifwith* names take sockaddr *'s as
354 * parameters.
355 */
356
357 static void
358 vnet_if_init(const void *unused __unused)
359 {
360
361 TAILQ_INIT(&V_ifnet);
362 TAILQ_INIT(&V_ifg_head);
363 if_grow(); /* create initial table */
364 vnet_if_clone_init();
365 }
366 VNET_SYSINIT(vnet_if_init, SI_SUB_INIT_IF, SI_ORDER_FIRST, vnet_if_init,
367 NULL);
368
369 /* ARGSUSED*/
370 static void
371 if_init(void *dummy __unused)
372 {
373
374 IFNET_LOCK_INIT();
375 if_clone_init();
376 }
377 SYSINIT(interfaces, SI_SUB_INIT_IF, SI_ORDER_SECOND, if_init, NULL);
378
379
380 #ifdef VIMAGE
381 static void
382 vnet_if_uninit(const void *unused __unused)
383 {
384
385 VNET_ASSERT(TAILQ_EMPTY(&V_ifnet));
386 VNET_ASSERT(TAILQ_EMPTY(&V_ifg_head));
387
388 free((caddr_t)V_ifindex_table, M_IFNET);
389 }
390 VNET_SYSUNINIT(vnet_if_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST,
391 vnet_if_uninit, NULL);
392 #endif
393
394 static void
395 if_grow(void)
396 {
397 u_int n;
398 struct ifindex_entry *e;
399
400 V_if_indexlim <<= 1;
401 n = V_if_indexlim * sizeof(*e);
402 e = malloc(n, M_IFNET, M_WAITOK | M_ZERO);
403 if (V_ifindex_table != NULL) {
404 memcpy((caddr_t)e, (caddr_t)V_ifindex_table, n/2);
405 free((caddr_t)V_ifindex_table, M_IFNET);
406 }
407 V_ifindex_table = e;
408 }
409
410 static void
411 if_check(void *dummy __unused)
412 {
413
414 /*
415 * If at least one interface added during boot uses
416 * if_watchdog then start the timer.
417 */
418 if (slowtimo_started)
419 if_slowtimo(0);
420 }
421
422 /*
423 * Allocate a struct ifnet and an index for an interface. A layer 2
424 * common structure will also be allocated if an allocation routine is
425 * registered for the passed type.
426 */
427 struct ifnet *
428 if_alloc(u_char type)
429 {
430 struct ifnet *ifp;
431 u_short idx;
432
433 ifp = malloc(sizeof(struct ifnet), M_IFNET, M_WAITOK|M_ZERO);
434 IFNET_WLOCK();
435 if (ifindex_alloc_locked(&idx) != 0) {
436 IFNET_WUNLOCK();
437 free(ifp, M_IFNET);
438 return (NULL);
439 }
440 ifnet_setbyindex_locked(idx, IFNET_HOLD);
441 IFNET_WUNLOCK();
442 ifp->if_index = idx;
443 ifp->if_type = type;
444 ifp->if_alloctype = type;
445 if (if_com_alloc[type] != NULL) {
446 ifp->if_l2com = if_com_alloc[type](type, ifp);
447 if (ifp->if_l2com == NULL) {
448 free(ifp, M_IFNET);
449 ifindex_free(idx);
450 return (NULL);
451 }
452 }
453
454 IF_ADDR_LOCK_INIT(ifp);
455 TASK_INIT(&ifp->if_linktask, 0, do_link_state_change, ifp);
456 ifp->if_afdata_initialized = 0;
457 IF_AFDATA_LOCK_INIT(ifp);
458 TAILQ_INIT(&ifp->if_addrhead);
459 TAILQ_INIT(&ifp->if_prefixhead);
460 TAILQ_INIT(&ifp->if_multiaddrs);
461 TAILQ_INIT(&ifp->if_groups);
462 #ifdef MAC
463 mac_ifnet_init(ifp);
464 #endif
465 ifq_init(&ifp->if_snd, ifp);
466
467 refcount_init(&ifp->if_refcount, 1); /* Index reference. */
468 ifnet_setbyindex(ifp->if_index, ifp);
469 return (ifp);
470 }
471
472 /*
473 * Do the actual work of freeing a struct ifnet, associated index, and layer
474 * 2 common structure. This call is made when the last reference to an
475 * interface is released.
476 */
477 static void
478 if_free_internal(struct ifnet *ifp)
479 {
480
481 KASSERT((ifp->if_flags & IFF_DYING),
482 ("if_free_internal: interface not dying"));
483
484 IFNET_WLOCK();
485 KASSERT(ifp == ifnet_byindex_locked(ifp->if_index),
486 ("%s: freeing unallocated ifnet", ifp->if_xname));
487
488 ifindex_free_locked(ifp->if_index);
489 IFNET_WUNLOCK();
490
491 if (if_com_free[ifp->if_alloctype] != NULL)
492 if_com_free[ifp->if_alloctype](ifp->if_l2com,
493 ifp->if_alloctype);
494
495 #ifdef MAC
496 mac_ifnet_destroy(ifp);
497 #endif /* MAC */
498 if (ifp->if_description != NULL)
499 free(ifp->if_description, M_IFDESCR);
500 IF_AFDATA_DESTROY(ifp);
501 IF_ADDR_LOCK_DESTROY(ifp);
502 ifq_delete(&ifp->if_snd);
503 free(ifp, M_IFNET);
504 }
505
506 /*
507 * This version should only be called by intefaces that switch their type
508 * after calling if_alloc(). if_free_type() will go away again now that we
509 * have if_alloctype to cache the original allocation type. For now, assert
510 * that they match, since we require that in practice.
511 */
512 void
513 if_free_type(struct ifnet *ifp, u_char type)
514 {
515
516 KASSERT(ifp->if_alloctype == type,
517 ("if_free_type: type (%d) != alloctype (%d)", type,
518 ifp->if_alloctype));
519
520 ifp->if_flags |= IFF_DYING; /* XXX: Locking */
521 if (!refcount_release(&ifp->if_refcount))
522 return;
523 if_free_internal(ifp);
524 }
525
526 /*
527 * This is the normal version of if_free(), used by device drivers to free a
528 * detached network interface. The contents of if_free_type() will move into
529 * here when if_free_type() goes away.
530 */
531 void
532 if_free(struct ifnet *ifp)
533 {
534
535 if_free_type(ifp, ifp->if_alloctype);
536 }
537
538 /*
539 * Interfaces to keep an ifnet type-stable despite the possibility of the
540 * driver calling if_free(). If there are additional references, we defer
541 * freeing the underlying data structure.
542 */
543 void
544 if_ref(struct ifnet *ifp)
545 {
546
547 /* We don't assert the ifnet list lock here, but arguably should. */
548 refcount_acquire(&ifp->if_refcount);
549 }
550
551 void
552 if_rele(struct ifnet *ifp)
553 {
554
555 if (!refcount_release(&ifp->if_refcount))
556 return;
557 if_free_internal(ifp);
558 }
559
560 void
561 ifq_init(struct ifaltq *ifq, struct ifnet *ifp)
562 {
563
564 mtx_init(&ifq->ifq_mtx, ifp->if_xname, "if send queue", MTX_DEF);
565
566 if (ifq->ifq_maxlen == 0)
567 ifq->ifq_maxlen = ifqmaxlen;
568
569 ifq->altq_type = 0;
570 ifq->altq_disc = NULL;
571 ifq->altq_flags &= ALTQF_CANTCHANGE;
572 ifq->altq_tbr = NULL;
573 ifq->altq_ifp = ifp;
574 }
575
576 void
577 ifq_delete(struct ifaltq *ifq)
578 {
579 mtx_destroy(&ifq->ifq_mtx);
580 }
581
582 /*
583 * Perform generic interface initalization tasks and attach the interface
584 * to the list of "active" interfaces. If vmove flag is set on entry
585 * to if_attach_internal(), perform only a limited subset of initialization
586 * tasks, given that we are moving from one vnet to another an ifnet which
587 * has already been fully initialized.
588 *
589 * XXX:
590 * - The decision to return void and thus require this function to
591 * succeed is questionable.
592 * - We should probably do more sanity checking. For instance we don't
593 * do anything to insure if_xname is unique or non-empty.
594 */
595 void
596 if_attach(struct ifnet *ifp)
597 {
598
599 if_attach_internal(ifp, 0);
600 }
601
602 static void
603 if_attach_internal(struct ifnet *ifp, int vmove)
604 {
605 unsigned socksize, ifasize;
606 int namelen, masklen;
607 struct sockaddr_dl *sdl;
608 struct ifaddr *ifa;
609
610 if (ifp->if_index == 0 || ifp != ifnet_byindex(ifp->if_index))
611 panic ("%s: BUG: if_attach called without if_alloc'd input()\n",
612 ifp->if_xname);
613
614 #ifdef VIMAGE
615 ifp->if_vnet = curvnet;
616 if (ifp->if_home_vnet == NULL)
617 ifp->if_home_vnet = curvnet;
618 #endif
619
620 if_addgroup(ifp, IFG_ALL);
621
622 getmicrotime(&ifp->if_lastchange);
623 ifp->if_data.ifi_epoch = time_uptime;
624 ifp->if_data.ifi_datalen = sizeof(struct if_data);
625
626 KASSERT((ifp->if_transmit == NULL && ifp->if_qflush == NULL) ||
627 (ifp->if_transmit != NULL && ifp->if_qflush != NULL),
628 ("transmit and qflush must both either be set or both be NULL"));
629 if (ifp->if_transmit == NULL) {
630 ifp->if_transmit = if_transmit;
631 ifp->if_qflush = if_qflush;
632 }
633
634 if (!vmove) {
635 #ifdef MAC
636 mac_ifnet_create(ifp);
637 #endif
638
639 /*
640 * Create a Link Level name for this device.
641 */
642 namelen = strlen(ifp->if_xname);
643 /*
644 * Always save enough space for any possiable name so we
645 * can do a rename in place later.
646 */
647 masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + IFNAMSIZ;
648 socksize = masklen + ifp->if_addrlen;
649 if (socksize < sizeof(*sdl))
650 socksize = sizeof(*sdl);
651 socksize = roundup2(socksize, sizeof(long));
652 ifasize = sizeof(*ifa) + 2 * socksize;
653 ifa = malloc(ifasize, M_IFADDR, M_WAITOK | M_ZERO);
654 ifa_init(ifa);
655 sdl = (struct sockaddr_dl *)(ifa + 1);
656 sdl->sdl_len = socksize;
657 sdl->sdl_family = AF_LINK;
658 bcopy(ifp->if_xname, sdl->sdl_data, namelen);
659 sdl->sdl_nlen = namelen;
660 sdl->sdl_index = ifp->if_index;
661 sdl->sdl_type = ifp->if_type;
662 ifp->if_addr = ifa;
663 ifa->ifa_ifp = ifp;
664 ifa->ifa_rtrequest = link_rtrequest;
665 ifa->ifa_addr = (struct sockaddr *)sdl;
666 sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl);
667 ifa->ifa_netmask = (struct sockaddr *)sdl;
668 sdl->sdl_len = masklen;
669 while (namelen != 0)
670 sdl->sdl_data[--namelen] = 0xff;
671 TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
672 /* Reliably crash if used uninitialized. */
673 ifp->if_broadcastaddr = NULL;
674 }
675 #ifdef VIMAGE
676 else {
677 /*
678 * Update the interface index in the link layer address
679 * of the interface.
680 */
681 for (ifa = ifp->if_addr; ifa != NULL;
682 ifa = TAILQ_NEXT(ifa, ifa_link)) {
683 if (ifa->ifa_addr->sa_family == AF_LINK) {
684 sdl = (struct sockaddr_dl *)ifa->ifa_addr;
685 sdl->sdl_index = ifp->if_index;
686 }
687 }
688 }
689 #endif
690
691 IFNET_WLOCK();
692 TAILQ_INSERT_TAIL(&V_ifnet, ifp, if_link);
693 #ifdef VIMAGE
694 curvnet->vnet_ifcnt++;
695 #endif
696 IFNET_WUNLOCK();
697
698 if (domain_init_status >= 2)
699 if_attachdomain1(ifp);
700
701 EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp);
702 if (IS_DEFAULT_VNET(curvnet))
703 devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL);
704
705 /* Announce the interface. */
706 rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
707
708 if (!vmove && ifp->if_watchdog != NULL) {
709 if_printf(ifp,
710 "WARNING: using obsoleted if_watchdog interface\n");
711
712 /*
713 * Note that we need if_slowtimo(). If this happens after
714 * boot, then call if_slowtimo() directly.
715 */
716 if (atomic_cmpset_int(&slowtimo_started, 0, 1) && !cold)
717 if_slowtimo(0);
718 }
719 }
720
721 static void
722 if_attachdomain(void *dummy)
723 {
724 struct ifnet *ifp;
725 int s;
726
727 s = splnet();
728 TAILQ_FOREACH(ifp, &V_ifnet, if_link)
729 if_attachdomain1(ifp);
730 splx(s);
731 }
732 SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_SECOND,
733 if_attachdomain, NULL);
734
735 static void
736 if_attachdomain1(struct ifnet *ifp)
737 {
738 struct domain *dp;
739 int s;
740
741 s = splnet();
742
743 /*
744 * Since dp->dom_ifattach calls malloc() with M_WAITOK, we
745 * cannot lock ifp->if_afdata initialization, entirely.
746 */
747 if (IF_AFDATA_TRYLOCK(ifp) == 0) {
748 splx(s);
749 return;
750 }
751 if (ifp->if_afdata_initialized >= domain_init_status) {
752 IF_AFDATA_UNLOCK(ifp);
753 splx(s);
754 printf("if_attachdomain called more than once on %s\n",
755 ifp->if_xname);
756 return;
757 }
758 ifp->if_afdata_initialized = domain_init_status;
759 IF_AFDATA_UNLOCK(ifp);
760
761 /* address family dependent data region */
762 bzero(ifp->if_afdata, sizeof(ifp->if_afdata));
763 for (dp = domains; dp; dp = dp->dom_next) {
764 if (dp->dom_ifattach)
765 ifp->if_afdata[dp->dom_family] =
766 (*dp->dom_ifattach)(ifp);
767 }
768
769 splx(s);
770 }
771
772 /*
773 * Remove any unicast or broadcast network addresses from an interface.
774 */
775 void
776 if_purgeaddrs(struct ifnet *ifp)
777 {
778 struct ifaddr *ifa, *next;
779
780 TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, next) {
781 if (ifa->ifa_addr->sa_family == AF_LINK)
782 continue;
783 #ifdef INET
784 /* XXX: Ugly!! ad hoc just for INET */
785 if (ifa->ifa_addr->sa_family == AF_INET) {
786 struct ifaliasreq ifr;
787
788 bzero(&ifr, sizeof(ifr));
789 ifr.ifra_addr = *ifa->ifa_addr;
790 if (ifa->ifa_dstaddr)
791 ifr.ifra_broadaddr = *ifa->ifa_dstaddr;
792 if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp,
793 NULL) == 0)
794 continue;
795 }
796 #endif /* INET */
797 #ifdef INET6
798 if (ifa->ifa_addr->sa_family == AF_INET6) {
799 in6_purgeaddr(ifa);
800 /* ifp_addrhead is already updated */
801 continue;
802 }
803 #endif /* INET6 */
804 TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link);
805 ifa_free(ifa);
806 }
807 }
808
809 /*
810 * Remove any multicast network addresses from an interface when an ifnet
811 * is going away.
812 */
813 static void
814 if_purgemaddrs(struct ifnet *ifp)
815 {
816 struct ifmultiaddr *ifma;
817 struct ifmultiaddr *next;
818
819 IF_ADDR_LOCK(ifp);
820 TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next)
821 if_delmulti_locked(ifp, ifma, 1);
822 IF_ADDR_UNLOCK(ifp);
823 }
824
825 /*
826 * Detach an interface, removing it from the list of "active" interfaces.
827 * If vmove flag is set on entry to if_detach_internal(), perform only a
828 * limited subset of cleanup tasks, given that we are moving an ifnet from
829 * one vnet to another, where it must be fully operational.
830 *
831 * XXXRW: There are some significant questions about event ordering, and
832 * how to prevent things from starting to use the interface during detach.
833 */
834 void
835 if_detach(struct ifnet *ifp)
836 {
837
838 if_detach_internal(ifp, 0);
839 }
840
841 static void
842 if_detach_internal(struct ifnet *ifp, int vmove)
843 {
844 struct ifaddr *ifa;
845 struct radix_node_head *rnh;
846 int i, j;
847 struct domain *dp;
848 struct ifnet *iter;
849 int found = 0;
850
851 IFNET_WLOCK();
852 TAILQ_FOREACH(iter, &V_ifnet, if_link)
853 if (iter == ifp) {
854 TAILQ_REMOVE(&V_ifnet, ifp, if_link);
855 found = 1;
856 break;
857 }
858 #ifdef VIMAGE
859 if (found)
860 curvnet->vnet_ifcnt--;
861 #endif
862 IFNET_WUNLOCK();
863 if (!found) {
864 if (vmove)
865 panic("%s: ifp=%p not on the ifnet tailq %p",
866 __func__, ifp, &V_ifnet);
867 else
868 return; /* XXX this should panic as well? */
869 }
870
871 /*
872 * Remove/wait for pending events.
873 */
874 taskqueue_drain(taskqueue_swi, &ifp->if_linktask);
875
876 /*
877 * Remove routes and flush queues.
878 */
879 if_down(ifp);
880 #ifdef ALTQ
881 if (ALTQ_IS_ENABLED(&ifp->if_snd))
882 altq_disable(&ifp->if_snd);
883 if (ALTQ_IS_ATTACHED(&ifp->if_snd))
884 altq_detach(&ifp->if_snd);
885 #endif
886
887 if_purgeaddrs(ifp);
888
889 #ifdef INET
890 in_ifdetach(ifp);
891 #endif
892
893 #ifdef INET6
894 /*
895 * Remove all IPv6 kernel structs related to ifp. This should be done
896 * before removing routing entries below, since IPv6 interface direct
897 * routes are expected to be removed by the IPv6-specific kernel API.
898 * Otherwise, the kernel will detect some inconsistency and bark it.
899 */
900 in6_ifdetach(ifp);
901 #endif
902 if_purgemaddrs(ifp);
903
904 if (!vmove) {
905 /*
906 * Prevent further calls into the device driver via ifnet.
907 */
908 if_dead(ifp);
909
910 /*
911 * Remove link ifaddr pointer and maybe decrement if_index.
912 * Clean up all addresses.
913 */
914 ifp->if_addr = NULL;
915
916 /* We can now free link ifaddr. */
917 if (!TAILQ_EMPTY(&ifp->if_addrhead)) {
918 ifa = TAILQ_FIRST(&ifp->if_addrhead);
919 TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link);
920 ifa_free(ifa);
921 }
922 }
923
924 /*
925 * Delete all remaining routes using this interface
926 * Unfortuneatly the only way to do this is to slog through
927 * the entire routing table looking for routes which point
928 * to this interface...oh well...
929 */
930 for (i = 1; i <= AF_MAX; i++) {
931 for (j = 0; j < rt_numfibs; j++) {
932 rnh = rt_tables_get_rnh(j, i);
933 if (rnh == NULL)
934 continue;
935 RADIX_NODE_HEAD_LOCK(rnh);
936 (void) rnh->rnh_walktree(rnh, if_rtdel, ifp);
937 RADIX_NODE_HEAD_UNLOCK(rnh);
938 }
939 }
940
941 /* Announce that the interface is gone. */
942 rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
943 EVENTHANDLER_INVOKE(ifnet_departure_event, ifp);
944 if (IS_DEFAULT_VNET(curvnet))
945 devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL);
946 if_delgroups(ifp);
947
948 /*
949 * We cannot hold the lock over dom_ifdetach calls as they might
950 * sleep, for example trying to drain a callout, thus open up the
951 * theoretical race with re-attaching.
952 */
953 IF_AFDATA_LOCK(ifp);
954 i = ifp->if_afdata_initialized;
955 ifp->if_afdata_initialized = 0;
956 IF_AFDATA_UNLOCK(ifp);
957 for (dp = domains; i > 0 && dp; dp = dp->dom_next) {
958 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family])
959 (*dp->dom_ifdetach)(ifp,
960 ifp->if_afdata[dp->dom_family]);
961 }
962 }
963
964 #ifdef VIMAGE
965 /*
966 * if_vmove() performs a limited version of if_detach() in current
967 * vnet and if_attach()es the ifnet to the vnet specified as 2nd arg.
968 * An attempt is made to shrink if_index in current vnet, find an
969 * unused if_index in target vnet and calls if_grow() if necessary,
970 * and finally find an unused if_xname for the target vnet.
971 */
972 void
973 if_vmove(struct ifnet *ifp, struct vnet *new_vnet)
974 {
975 u_short idx;
976
977 /*
978 * Detach from current vnet, but preserve LLADDR info, do not
979 * mark as dead etc. so that the ifnet can be reattached later.
980 */
981 if_detach_internal(ifp, 1);
982
983 /*
984 * Unlink the ifnet from ifindex_table[] in current vnet, and shrink
985 * the if_index for that vnet if possible.
986 *
987 * NOTE: IFNET_WLOCK/IFNET_WUNLOCK() are assumed to be unvirtualized,
988 * or we'd lock on one vnet and unlock on another.
989 */
990 IFNET_WLOCK();
991 ifindex_free_locked(ifp->if_index);
992 IFNET_WUNLOCK();
993
994 /*
995 * Perform interface-specific reassignment tasks, if provided by
996 * the driver.
997 */
998 if (ifp->if_reassign != NULL)
999 ifp->if_reassign(ifp, new_vnet, NULL);
1000
1001 /*
1002 * Switch to the context of the target vnet.
1003 */
1004 CURVNET_SET_QUIET(new_vnet);
1005
1006 IFNET_WLOCK();
1007 if (ifindex_alloc_locked(&idx) != 0) {
1008 IFNET_WUNLOCK();
1009 panic("if_index overflow");
1010 }
1011 ifp->if_index = idx;
1012 ifnet_setbyindex_locked(ifp->if_index, ifp);
1013 IFNET_WUNLOCK();
1014
1015 if_attach_internal(ifp, 1);
1016
1017 CURVNET_RESTORE();
1018 }
1019
1020 /*
1021 * Move an ifnet to or from another child prison/vnet, specified by the jail id.
1022 */
1023 static int
1024 if_vmove_loan(struct thread *td, struct ifnet *ifp, char *ifname, int jid)
1025 {
1026 struct prison *pr;
1027 struct ifnet *difp;
1028
1029 /* Try to find the prison within our visibility. */
1030 sx_slock(&allprison_lock);
1031 pr = prison_find_child(td->td_ucred->cr_prison, jid);
1032 sx_sunlock(&allprison_lock);
1033 if (pr == NULL)
1034 return (ENXIO);
1035 prison_hold_locked(pr);
1036 mtx_unlock(&pr->pr_mtx);
1037
1038 /* Do not try to move the iface from and to the same prison. */
1039 if (pr->pr_vnet == ifp->if_vnet) {
1040 prison_free(pr);
1041 return (EEXIST);
1042 }
1043
1044 /* Make sure the named iface does not exists in the dst. prison/vnet. */
1045 /* XXX Lock interfaces to avoid races. */
1046 CURVNET_SET_QUIET(pr->pr_vnet);
1047 difp = ifunit(ifname);
1048 CURVNET_RESTORE();
1049 if (difp != NULL) {
1050 prison_free(pr);
1051 return (EEXIST);
1052 }
1053
1054 /* Move the interface into the child jail/vnet. */
1055 if_vmove(ifp, pr->pr_vnet);
1056
1057 /* Report the new if_xname back to the userland. */
1058 sprintf(ifname, "%s", ifp->if_xname);
1059
1060 prison_free(pr);
1061 return (0);
1062 }
1063
1064 static int
1065 if_vmove_reclaim(struct thread *td, char *ifname, int jid)
1066 {
1067 struct prison *pr;
1068 struct vnet *vnet_dst;
1069 struct ifnet *ifp;
1070
1071 /* Try to find the prison within our visibility. */
1072 sx_slock(&allprison_lock);
1073 pr = prison_find_child(td->td_ucred->cr_prison, jid);
1074 sx_sunlock(&allprison_lock);
1075 if (pr == NULL)
1076 return (ENXIO);
1077 prison_hold_locked(pr);
1078 mtx_unlock(&pr->pr_mtx);
1079
1080 /* Make sure the named iface exists in the source prison/vnet. */
1081 CURVNET_SET(pr->pr_vnet);
1082 ifp = ifunit(ifname); /* XXX Lock to avoid races. */
1083 if (ifp == NULL) {
1084 CURVNET_RESTORE();
1085 prison_free(pr);
1086 return (ENXIO);
1087 }
1088
1089 /* Do not try to move the iface from and to the same prison. */
1090 vnet_dst = TD_TO_VNET(td);
1091 if (vnet_dst == ifp->if_vnet) {
1092 CURVNET_RESTORE();
1093 prison_free(pr);
1094 return (EEXIST);
1095 }
1096
1097 /* Get interface back from child jail/vnet. */
1098 if_vmove(ifp, vnet_dst);
1099 CURVNET_RESTORE();
1100
1101 /* Report the new if_xname back to the userland. */
1102 sprintf(ifname, "%s", ifp->if_xname);
1103
1104 prison_free(pr);
1105 return (0);
1106 }
1107 #endif /* VIMAGE */
1108
1109 /*
1110 * Add a group to an interface
1111 */
1112 int
1113 if_addgroup(struct ifnet *ifp, const char *groupname)
1114 {
1115 struct ifg_list *ifgl;
1116 struct ifg_group *ifg = NULL;
1117 struct ifg_member *ifgm;
1118
1119 if (groupname[0] && groupname[strlen(groupname) - 1] >= '' &&
1120 groupname[strlen(groupname) - 1] <= '9')
1121 return (EINVAL);
1122
1123 IFNET_WLOCK();
1124 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
1125 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) {
1126 IFNET_WUNLOCK();
1127 return (EEXIST);
1128 }
1129
1130 if ((ifgl = (struct ifg_list *)malloc(sizeof(struct ifg_list), M_TEMP,
1131 M_NOWAIT)) == NULL) {
1132 IFNET_WUNLOCK();
1133 return (ENOMEM);
1134 }
1135
1136 if ((ifgm = (struct ifg_member *)malloc(sizeof(struct ifg_member),
1137 M_TEMP, M_NOWAIT)) == NULL) {
1138 free(ifgl, M_TEMP);
1139 IFNET_WUNLOCK();
1140 return (ENOMEM);
1141 }
1142
1143 TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next)
1144 if (!strcmp(ifg->ifg_group, groupname))
1145 break;
1146
1147 if (ifg == NULL) {
1148 if ((ifg = (struct ifg_group *)malloc(sizeof(struct ifg_group),
1149 M_TEMP, M_NOWAIT)) == NULL) {
1150 free(ifgl, M_TEMP);
1151 free(ifgm, M_TEMP);
1152 IFNET_WUNLOCK();
1153 return (ENOMEM);
1154 }
1155 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group));
1156 ifg->ifg_refcnt = 0;
1157 TAILQ_INIT(&ifg->ifg_members);
1158 EVENTHANDLER_INVOKE(group_attach_event, ifg);
1159 TAILQ_INSERT_TAIL(&V_ifg_head, ifg, ifg_next);
1160 }
1161
1162 ifg->ifg_refcnt++;
1163 ifgl->ifgl_group = ifg;
1164 ifgm->ifgm_ifp = ifp;
1165
1166 IF_ADDR_LOCK(ifp);
1167 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next);
1168 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next);
1169 IF_ADDR_UNLOCK(ifp);
1170
1171 IFNET_WUNLOCK();
1172
1173 EVENTHANDLER_INVOKE(group_change_event, groupname);
1174
1175 return (0);
1176 }
1177
1178 /*
1179 * Remove a group from an interface
1180 */
1181 int
1182 if_delgroup(struct ifnet *ifp, const char *groupname)
1183 {
1184 struct ifg_list *ifgl;
1185 struct ifg_member *ifgm;
1186
1187 IFNET_WLOCK();
1188 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
1189 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname))
1190 break;
1191 if (ifgl == NULL) {
1192 IFNET_WUNLOCK();
1193 return (ENOENT);
1194 }
1195
1196 IF_ADDR_LOCK(ifp);
1197 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next);
1198 IF_ADDR_UNLOCK(ifp);
1199
1200 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next)
1201 if (ifgm->ifgm_ifp == ifp)
1202 break;
1203
1204 if (ifgm != NULL) {
1205 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next);
1206 free(ifgm, M_TEMP);
1207 }
1208
1209 if (--ifgl->ifgl_group->ifg_refcnt == 0) {
1210 TAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_next);
1211 EVENTHANDLER_INVOKE(group_detach_event, ifgl->ifgl_group);
1212 free(ifgl->ifgl_group, M_TEMP);
1213 }
1214 IFNET_WUNLOCK();
1215
1216 free(ifgl, M_TEMP);
1217
1218 EVENTHANDLER_INVOKE(group_change_event, groupname);
1219
1220 return (0);
1221 }
1222
1223 /*
1224 * Remove an interface from all groups
1225 */
1226 static void
1227 if_delgroups(struct ifnet *ifp)
1228 {
1229 struct ifg_list *ifgl;
1230 struct ifg_member *ifgm;
1231 char groupname[IFNAMSIZ];
1232
1233 IFNET_WLOCK();
1234 while (!TAILQ_EMPTY(&ifp->if_groups)) {
1235 ifgl = TAILQ_FIRST(&ifp->if_groups);
1236
1237 strlcpy(groupname, ifgl->ifgl_group->ifg_group, IFNAMSIZ);
1238
1239 IF_ADDR_LOCK(ifp);
1240 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next);
1241 IF_ADDR_UNLOCK(ifp);
1242
1243 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next)
1244 if (ifgm->ifgm_ifp == ifp)
1245 break;
1246
1247 if (ifgm != NULL) {
1248 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm,
1249 ifgm_next);
1250 free(ifgm, M_TEMP);
1251 }
1252
1253 if (--ifgl->ifgl_group->ifg_refcnt == 0) {
1254 TAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_next);
1255 EVENTHANDLER_INVOKE(group_detach_event,
1256 ifgl->ifgl_group);
1257 free(ifgl->ifgl_group, M_TEMP);
1258 }
1259 IFNET_WUNLOCK();
1260
1261 free(ifgl, M_TEMP);
1262
1263 EVENTHANDLER_INVOKE(group_change_event, groupname);
1264
1265 IFNET_WLOCK();
1266 }
1267 IFNET_WUNLOCK();
1268 }
1269
1270 /*
1271 * Stores all groups from an interface in memory pointed
1272 * to by data
1273 */
1274 static int
1275 if_getgroup(struct ifgroupreq *data, struct ifnet *ifp)
1276 {
1277 int len, error;
1278 struct ifg_list *ifgl;
1279 struct ifg_req ifgrq, *ifgp;
1280 struct ifgroupreq *ifgr = data;
1281
1282 if (ifgr->ifgr_len == 0) {
1283 IF_ADDR_LOCK(ifp);
1284 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
1285 ifgr->ifgr_len += sizeof(struct ifg_req);
1286 IF_ADDR_UNLOCK(ifp);
1287 return (0);
1288 }
1289
1290 len = ifgr->ifgr_len;
1291 ifgp = ifgr->ifgr_groups;
1292 /* XXX: wire */
1293 IF_ADDR_LOCK(ifp);
1294 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) {
1295 if (len < sizeof(ifgrq)) {
1296 IF_ADDR_UNLOCK(ifp);
1297 return (EINVAL);
1298 }
1299 bzero(&ifgrq, sizeof ifgrq);
1300 strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group,
1301 sizeof(ifgrq.ifgrq_group));
1302 if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) {
1303 IF_ADDR_UNLOCK(ifp);
1304 return (error);
1305 }
1306 len -= sizeof(ifgrq);
1307 ifgp++;
1308 }
1309 IF_ADDR_UNLOCK(ifp);
1310
1311 return (0);
1312 }
1313
1314 /*
1315 * Stores all members of a group in memory pointed to by data
1316 */
1317 static int
1318 if_getgroupmembers(struct ifgroupreq *data)
1319 {
1320 struct ifgroupreq *ifgr = data;
1321 struct ifg_group *ifg;
1322 struct ifg_member *ifgm;
1323 struct ifg_req ifgrq, *ifgp;
1324 int len, error;
1325
1326 IFNET_RLOCK();
1327 TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next)
1328 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name))
1329 break;
1330 if (ifg == NULL) {
1331 IFNET_RUNLOCK();
1332 return (ENOENT);
1333 }
1334
1335 if (ifgr->ifgr_len == 0) {
1336 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next)
1337 ifgr->ifgr_len += sizeof(ifgrq);
1338 IFNET_RUNLOCK();
1339 return (0);
1340 }
1341
1342 len = ifgr->ifgr_len;
1343 ifgp = ifgr->ifgr_groups;
1344 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) {
1345 if (len < sizeof(ifgrq)) {
1346 IFNET_RUNLOCK();
1347 return (EINVAL);
1348 }
1349 bzero(&ifgrq, sizeof ifgrq);
1350 strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname,
1351 sizeof(ifgrq.ifgrq_member));
1352 if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) {
1353 IFNET_RUNLOCK();
1354 return (error);
1355 }
1356 len -= sizeof(ifgrq);
1357 ifgp++;
1358 }
1359 IFNET_RUNLOCK();
1360
1361 return (0);
1362 }
1363
1364 /*
1365 * Delete Routes for a Network Interface
1366 *
1367 * Called for each routing entry via the rnh->rnh_walktree() call above
1368 * to delete all route entries referencing a detaching network interface.
1369 *
1370 * Arguments:
1371 * rn pointer to node in the routing table
1372 * arg argument passed to rnh->rnh_walktree() - detaching interface
1373 *
1374 * Returns:
1375 * 0 successful
1376 * errno failed - reason indicated
1377 *
1378 */
1379 static int
1380 if_rtdel(struct radix_node *rn, void *arg)
1381 {
1382 struct rtentry *rt = (struct rtentry *)rn;
1383 struct ifnet *ifp = arg;
1384 int err;
1385
1386 if (rt->rt_ifp == ifp) {
1387
1388 /*
1389 * Protect (sorta) against walktree recursion problems
1390 * with cloned routes
1391 */
1392 if ((rt->rt_flags & RTF_UP) == 0)
1393 return (0);
1394
1395 err = rtrequest_fib(RTM_DELETE, rt_key(rt), rt->rt_gateway,
1396 rt_mask(rt), rt->rt_flags|RTF_RNH_LOCKED,
1397 (struct rtentry **) NULL, rt->rt_fibnum);
1398 if (err) {
1399 log(LOG_WARNING, "if_rtdel: error %d\n", err);
1400 }
1401 }
1402
1403 return (0);
1404 }
1405
1406 /*
1407 * Wrapper functions for struct ifnet address list locking macros. These are
1408 * used by kernel modules to avoid encoding programming interface or binary
1409 * interface assumptions that may be violated when kernel-internal locking
1410 * approaches change.
1411 */
1412 void
1413 if_addr_rlock(struct ifnet *ifp)
1414 {
1415
1416 IF_ADDR_LOCK(ifp);
1417 }
1418
1419 void
1420 if_addr_runlock(struct ifnet *ifp)
1421 {
1422
1423 IF_ADDR_UNLOCK(ifp);
1424 }
1425
1426 void
1427 if_maddr_rlock(struct ifnet *ifp)
1428 {
1429
1430 IF_ADDR_LOCK(ifp);
1431 }
1432
1433 void
1434 if_maddr_runlock(struct ifnet *ifp)
1435 {
1436
1437 IF_ADDR_UNLOCK(ifp);
1438 }
1439
1440 /*
1441 * Reference count functions for ifaddrs.
1442 */
1443 void
1444 ifa_init(struct ifaddr *ifa)
1445 {
1446
1447 mtx_init(&ifa->ifa_mtx, "ifaddr", NULL, MTX_DEF);
1448 refcount_init(&ifa->ifa_refcnt, 1);
1449 }
1450
1451 void
1452 ifa_ref(struct ifaddr *ifa)
1453 {
1454
1455 refcount_acquire(&ifa->ifa_refcnt);
1456 }
1457
1458 void
1459 ifa_free(struct ifaddr *ifa)
1460 {
1461
1462 if (refcount_release(&ifa->ifa_refcnt)) {
1463 mtx_destroy(&ifa->ifa_mtx);
1464 free(ifa, M_IFADDR);
1465 }
1466 }
1467
1468 int
1469 ifa_add_loopback_route(struct ifaddr *ifa, struct sockaddr *ia)
1470 {
1471 int error = 0;
1472 struct rtentry *rt = NULL;
1473 struct rt_addrinfo info;
1474 static struct sockaddr_dl null_sdl = {sizeof(null_sdl), AF_LINK};
1475
1476 bzero(&info, sizeof(info));
1477 info.rti_ifp = V_loif;
1478 info.rti_flags = ifa->ifa_flags | RTF_HOST | RTF_STATIC;
1479 info.rti_info[RTAX_DST] = ia;
1480 info.rti_info[RTAX_GATEWAY] = (struct sockaddr *)&null_sdl;
1481 error = rtrequest1_fib(RTM_ADD, &info, &rt, 0);
1482
1483 if (error == 0 && rt != NULL) {
1484 RT_LOCK(rt);
1485 ((struct sockaddr_dl *)rt->rt_gateway)->sdl_type =
1486 ifa->ifa_ifp->if_type;
1487 ((struct sockaddr_dl *)rt->rt_gateway)->sdl_index =
1488 ifa->ifa_ifp->if_index;
1489 RT_REMREF(rt);
1490 RT_UNLOCK(rt);
1491 } else if (error != 0)
1492 log(LOG_INFO, "ifa_add_loopback_route: insertion failed\n");
1493
1494 return (error);
1495 }
1496
1497 int
1498 ifa_del_loopback_route(struct ifaddr *ifa, struct sockaddr *ia)
1499 {
1500 int error = 0;
1501 struct rt_addrinfo info;
1502 struct sockaddr_dl null_sdl;
1503
1504 bzero(&null_sdl, sizeof(null_sdl));
1505 null_sdl.sdl_len = sizeof(null_sdl);
1506 null_sdl.sdl_family = AF_LINK;
1507 null_sdl.sdl_type = ifa->ifa_ifp->if_type;
1508 null_sdl.sdl_index = ifa->ifa_ifp->if_index;
1509 bzero(&info, sizeof(info));
1510 info.rti_flags = ifa->ifa_flags | RTF_HOST | RTF_STATIC;
1511 info.rti_info[RTAX_DST] = ia;
1512 info.rti_info[RTAX_GATEWAY] = (struct sockaddr *)&null_sdl;
1513 error = rtrequest1_fib(RTM_DELETE, &info, NULL, 0);
1514
1515 if (error != 0)
1516 log(LOG_INFO, "ifa_del_loopback_route: deletion failed\n");
1517
1518 return (error);
1519 }
1520
1521 /*
1522 * XXX: Because sockaddr_dl has deeper structure than the sockaddr
1523 * structs used to represent other address families, it is necessary
1524 * to perform a different comparison.
1525 */
1526
1527 #define sa_equal(a1, a2) \
1528 (bcmp((a1), (a2), ((a1))->sa_len) == 0)
1529
1530 #define sa_dl_equal(a1, a2) \
1531 ((((struct sockaddr_dl *)(a1))->sdl_len == \
1532 ((struct sockaddr_dl *)(a2))->sdl_len) && \
1533 (bcmp(LLADDR((struct sockaddr_dl *)(a1)), \
1534 LLADDR((struct sockaddr_dl *)(a2)), \
1535 ((struct sockaddr_dl *)(a1))->sdl_alen) == 0))
1536
1537 /*
1538 * Locate an interface based on a complete address.
1539 */
1540 /*ARGSUSED*/
1541 static struct ifaddr *
1542 ifa_ifwithaddr_internal(struct sockaddr *addr, int getref)
1543 {
1544 struct ifnet *ifp;
1545 struct ifaddr *ifa;
1546
1547 IFNET_RLOCK_NOSLEEP();
1548 TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
1549 IF_ADDR_LOCK(ifp);
1550 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1551 if (ifa->ifa_addr->sa_family != addr->sa_family)
1552 continue;
1553 if (sa_equal(addr, ifa->ifa_addr)) {
1554 if (getref)
1555 ifa_ref(ifa);
1556 IF_ADDR_UNLOCK(ifp);
1557 goto done;
1558 }
1559 /* IP6 doesn't have broadcast */
1560 if ((ifp->if_flags & IFF_BROADCAST) &&
1561 ifa->ifa_broadaddr &&
1562 ifa->ifa_broadaddr->sa_len != 0 &&
1563 sa_equal(ifa->ifa_broadaddr, addr)) {
1564 if (getref)
1565 ifa_ref(ifa);
1566 IF_ADDR_UNLOCK(ifp);
1567 goto done;
1568 }
1569 }
1570 IF_ADDR_UNLOCK(ifp);
1571 }
1572 ifa = NULL;
1573 done:
1574 IFNET_RUNLOCK_NOSLEEP();
1575 return (ifa);
1576 }
1577
1578 struct ifaddr *
1579 ifa_ifwithaddr(struct sockaddr *addr)
1580 {
1581
1582 return (ifa_ifwithaddr_internal(addr, 1));
1583 }
1584
1585 int
1586 ifa_ifwithaddr_check(struct sockaddr *addr)
1587 {
1588
1589 return (ifa_ifwithaddr_internal(addr, 0) != NULL);
1590 }
1591
1592 /*
1593 * Locate an interface based on the broadcast address.
1594 */
1595 /* ARGSUSED */
1596 struct ifaddr *
1597 ifa_ifwithbroadaddr(struct sockaddr *addr)
1598 {
1599 struct ifnet *ifp;
1600 struct ifaddr *ifa;
1601
1602 IFNET_RLOCK_NOSLEEP();
1603 TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
1604 IF_ADDR_LOCK(ifp);
1605 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1606 if (ifa->ifa_addr->sa_family != addr->sa_family)
1607 continue;
1608 if ((ifp->if_flags & IFF_BROADCAST) &&
1609 ifa->ifa_broadaddr &&
1610 ifa->ifa_broadaddr->sa_len != 0 &&
1611 sa_equal(ifa->ifa_broadaddr, addr)) {
1612 ifa_ref(ifa);
1613 IF_ADDR_UNLOCK(ifp);
1614 goto done;
1615 }
1616 }
1617 IF_ADDR_UNLOCK(ifp);
1618 }
1619 ifa = NULL;
1620 done:
1621 IFNET_RUNLOCK_NOSLEEP();
1622 return (ifa);
1623 }
1624
1625 /*
1626 * Locate the point to point interface with a given destination address.
1627 */
1628 /*ARGSUSED*/
1629 struct ifaddr *
1630 ifa_ifwithdstaddr(struct sockaddr *addr)
1631 {
1632 struct ifnet *ifp;
1633 struct ifaddr *ifa;
1634
1635 IFNET_RLOCK_NOSLEEP();
1636 TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
1637 if ((ifp->if_flags & IFF_POINTOPOINT) == 0)
1638 continue;
1639 IF_ADDR_LOCK(ifp);
1640 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1641 if (ifa->ifa_addr->sa_family != addr->sa_family)
1642 continue;
1643 if (ifa->ifa_dstaddr != NULL &&
1644 sa_equal(addr, ifa->ifa_dstaddr)) {
1645 ifa_ref(ifa);
1646 IF_ADDR_UNLOCK(ifp);
1647 goto done;
1648 }
1649 }
1650 IF_ADDR_UNLOCK(ifp);
1651 }
1652 ifa = NULL;
1653 done:
1654 IFNET_RUNLOCK_NOSLEEP();
1655 return (ifa);
1656 }
1657
1658 /*
1659 * Find an interface on a specific network. If many, choice
1660 * is most specific found.
1661 */
1662 struct ifaddr *
1663 ifa_ifwithnet(struct sockaddr *addr, int ignore_ptp)
1664 {
1665 struct ifnet *ifp;
1666 struct ifaddr *ifa;
1667 struct ifaddr *ifa_maybe = NULL;
1668 u_int af = addr->sa_family;
1669 char *addr_data = addr->sa_data, *cplim;
1670
1671 /*
1672 * AF_LINK addresses can be looked up directly by their index number,
1673 * so do that if we can.
1674 */
1675 if (af == AF_LINK) {
1676 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr;
1677 if (sdl->sdl_index && sdl->sdl_index <= V_if_index)
1678 return (ifaddr_byindex(sdl->sdl_index));
1679 }
1680
1681 /*
1682 * Scan though each interface, looking for ones that have addresses
1683 * in this address family. Maintain a reference on ifa_maybe once
1684 * we find one, as we release the IF_ADDR_LOCK() that kept it stable
1685 * when we move onto the next interface.
1686 */
1687 IFNET_RLOCK_NOSLEEP();
1688 TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
1689 IF_ADDR_LOCK(ifp);
1690 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1691 char *cp, *cp2, *cp3;
1692
1693 if (ifa->ifa_addr->sa_family != af)
1694 next: continue;
1695 if (af == AF_INET &&
1696 ifp->if_flags & IFF_POINTOPOINT && !ignore_ptp) {
1697 /*
1698 * This is a bit broken as it doesn't
1699 * take into account that the remote end may
1700 * be a single node in the network we are
1701 * looking for.
1702 * The trouble is that we don't know the
1703 * netmask for the remote end.
1704 */
1705 if (ifa->ifa_dstaddr != NULL &&
1706 sa_equal(addr, ifa->ifa_dstaddr)) {
1707 ifa_ref(ifa);
1708 IF_ADDR_UNLOCK(ifp);
1709 goto done;
1710 }
1711 } else {
1712 /*
1713 * if we have a special address handler,
1714 * then use it instead of the generic one.
1715 */
1716 if (ifa->ifa_claim_addr) {
1717 if ((*ifa->ifa_claim_addr)(ifa, addr)) {
1718 ifa_ref(ifa);
1719 IF_ADDR_UNLOCK(ifp);
1720 goto done;
1721 }
1722 continue;
1723 }
1724
1725 /*
1726 * Scan all the bits in the ifa's address.
1727 * If a bit dissagrees with what we are
1728 * looking for, mask it with the netmask
1729 * to see if it really matters.
1730 * (A byte at a time)
1731 */
1732 if (ifa->ifa_netmask == 0)
1733 continue;
1734 cp = addr_data;
1735 cp2 = ifa->ifa_addr->sa_data;
1736 cp3 = ifa->ifa_netmask->sa_data;
1737 cplim = ifa->ifa_netmask->sa_len
1738 + (char *)ifa->ifa_netmask;
1739 while (cp3 < cplim)
1740 if ((*cp++ ^ *cp2++) & *cp3++)
1741 goto next; /* next address! */
1742 /*
1743 * If the netmask of what we just found
1744 * is more specific than what we had before
1745 * (if we had one) then remember the new one
1746 * before continuing to search
1747 * for an even better one.
1748 */
1749 if (ifa_maybe == NULL ||
1750 rn_refines((caddr_t)ifa->ifa_netmask,
1751 (caddr_t)ifa_maybe->ifa_netmask)) {
1752 if (ifa_maybe != NULL)
1753 ifa_free(ifa_maybe);
1754 ifa_maybe = ifa;
1755 ifa_ref(ifa_maybe);
1756 }
1757 }
1758 }
1759 IF_ADDR_UNLOCK(ifp);
1760 }
1761 ifa = ifa_maybe;
1762 ifa_maybe = NULL;
1763 done:
1764 IFNET_RUNLOCK_NOSLEEP();
1765 if (ifa_maybe != NULL)
1766 ifa_free(ifa_maybe);
1767 return (ifa);
1768 }
1769
1770 /*
1771 * Find an interface address specific to an interface best matching
1772 * a given address.
1773 */
1774 struct ifaddr *
1775 ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp)
1776 {
1777 struct ifaddr *ifa;
1778 char *cp, *cp2, *cp3;
1779 char *cplim;
1780 struct ifaddr *ifa_maybe = NULL;
1781 u_int af = addr->sa_family;
1782
1783 if (af >= AF_MAX)
1784 return (NULL);
1785 IF_ADDR_LOCK(ifp);
1786 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1787 if (ifa->ifa_addr->sa_family != af)
1788 continue;
1789 if (ifa_maybe == NULL)
1790 ifa_maybe = ifa;
1791 if (ifa->ifa_netmask == 0) {
1792 if (sa_equal(addr, ifa->ifa_addr) ||
1793 (ifa->ifa_dstaddr &&
1794 sa_equal(addr, ifa->ifa_dstaddr)))
1795 goto done;
1796 continue;
1797 }
1798 if (ifp->if_flags & IFF_POINTOPOINT) {
1799 if (sa_equal(addr, ifa->ifa_dstaddr))
1800 goto done;
1801 } else {
1802 cp = addr->sa_data;
1803 cp2 = ifa->ifa_addr->sa_data;
1804 cp3 = ifa->ifa_netmask->sa_data;
1805 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
1806 for (; cp3 < cplim; cp3++)
1807 if ((*cp++ ^ *cp2++) & *cp3)
1808 break;
1809 if (cp3 == cplim)
1810 goto done;
1811 }
1812 }
1813 ifa = ifa_maybe;
1814 done:
1815 if (ifa != NULL)
1816 ifa_ref(ifa);
1817 IF_ADDR_UNLOCK(ifp);
1818 return (ifa);
1819 }
1820
1821 #include <net/if_llatbl.h>
1822
1823 /*
1824 * Default action when installing a route with a Link Level gateway.
1825 * Lookup an appropriate real ifa to point to.
1826 * This should be moved to /sys/net/link.c eventually.
1827 */
1828 static void
1829 link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info)
1830 {
1831 struct ifaddr *ifa, *oifa;
1832 struct sockaddr *dst;
1833 struct ifnet *ifp;
1834
1835 RT_LOCK_ASSERT(rt);
1836
1837 if (cmd != RTM_ADD || ((ifa = rt->rt_ifa) == 0) ||
1838 ((ifp = ifa->ifa_ifp) == 0) || ((dst = rt_key(rt)) == 0))
1839 return;
1840 ifa = ifaof_ifpforaddr(dst, ifp);
1841 if (ifa) {
1842 oifa = rt->rt_ifa;
1843 rt->rt_ifa = ifa;
1844 ifa_free(oifa);
1845 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest)
1846 ifa->ifa_rtrequest(cmd, rt, info);
1847 }
1848 }
1849
1850 /*
1851 * Mark an interface down and notify protocols of
1852 * the transition.
1853 * NOTE: must be called at splnet or eqivalent.
1854 */
1855 static void
1856 if_unroute(struct ifnet *ifp, int flag, int fam)
1857 {
1858 struct ifaddr *ifa;
1859
1860 KASSERT(flag == IFF_UP, ("if_unroute: flag != IFF_UP"));
1861
1862 ifp->if_flags &= ~flag;
1863 getmicrotime(&ifp->if_lastchange);
1864 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
1865 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
1866 pfctlinput(PRC_IFDOWN, ifa->ifa_addr);
1867 ifp->if_qflush(ifp);
1868
1869 if (ifp->if_carp)
1870 (*carp_linkstate_p)(ifp);
1871 rt_ifmsg(ifp);
1872 }
1873
1874 /*
1875 * Mark an interface up and notify protocols of
1876 * the transition.
1877 * NOTE: must be called at splnet or eqivalent.
1878 */
1879 static void
1880 if_route(struct ifnet *ifp, int flag, int fam)
1881 {
1882 struct ifaddr *ifa;
1883
1884 KASSERT(flag == IFF_UP, ("if_route: flag != IFF_UP"));
1885
1886 ifp->if_flags |= flag;
1887 getmicrotime(&ifp->if_lastchange);
1888 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
1889 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
1890 pfctlinput(PRC_IFUP, ifa->ifa_addr);
1891 if (ifp->if_carp)
1892 (*carp_linkstate_p)(ifp);
1893 rt_ifmsg(ifp);
1894 #ifdef INET6
1895 in6_if_up(ifp);
1896 #endif
1897 }
1898
1899 void (*vlan_link_state_p)(struct ifnet *, int); /* XXX: private from if_vlan */
1900 void (*vlan_trunk_cap_p)(struct ifnet *); /* XXX: private from if_vlan */
1901
1902 /*
1903 * Handle a change in the interface link state. To avoid LORs
1904 * between driver lock and upper layer locks, as well as possible
1905 * recursions, we post event to taskqueue, and all job
1906 * is done in static do_link_state_change().
1907 */
1908 void
1909 if_link_state_change(struct ifnet *ifp, int link_state)
1910 {
1911 /* Return if state hasn't changed. */
1912 if (ifp->if_link_state == link_state)
1913 return;
1914
1915 ifp->if_link_state = link_state;
1916
1917 taskqueue_enqueue(taskqueue_swi, &ifp->if_linktask);
1918 }
1919
1920 static void
1921 do_link_state_change(void *arg, int pending)
1922 {
1923 struct ifnet *ifp = (struct ifnet *)arg;
1924 int link_state = ifp->if_link_state;
1925 CURVNET_SET(ifp->if_vnet);
1926
1927 /* Notify that the link state has changed. */
1928 rt_ifmsg(ifp);
1929 if (ifp->if_vlantrunk != NULL)
1930 (*vlan_link_state_p)(ifp, 0);
1931
1932 if ((ifp->if_type == IFT_ETHER || ifp->if_type == IFT_L2VLAN) &&
1933 IFP2AC(ifp)->ac_netgraph != NULL)
1934 (*ng_ether_link_state_p)(ifp, link_state);
1935 if (ifp->if_carp)
1936 (*carp_linkstate_p)(ifp);
1937 if (ifp->if_bridge) {
1938 KASSERT(bstp_linkstate_p != NULL,("if_bridge bstp not loaded!"));
1939 (*bstp_linkstate_p)(ifp, link_state);
1940 }
1941 if (ifp->if_lagg) {
1942 KASSERT(lagg_linkstate_p != NULL,("if_lagg not loaded!"));
1943 (*lagg_linkstate_p)(ifp, link_state);
1944 }
1945
1946 if (IS_DEFAULT_VNET(curvnet))
1947 devctl_notify("IFNET", ifp->if_xname,
1948 (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN",
1949 NULL);
1950 if (pending > 1)
1951 if_printf(ifp, "%d link states coalesced\n", pending);
1952 if (log_link_state_change)
1953 log(LOG_NOTICE, "%s: link state changed to %s\n", ifp->if_xname,
1954 (link_state == LINK_STATE_UP) ? "UP" : "DOWN" );
1955 CURVNET_RESTORE();
1956 }
1957
1958 /*
1959 * Mark an interface down and notify protocols of
1960 * the transition.
1961 * NOTE: must be called at splnet or eqivalent.
1962 */
1963 void
1964 if_down(struct ifnet *ifp)
1965 {
1966
1967 if_unroute(ifp, IFF_UP, AF_UNSPEC);
1968 }
1969
1970 /*
1971 * Mark an interface up and notify protocols of
1972 * the transition.
1973 * NOTE: must be called at splnet or eqivalent.
1974 */
1975 void
1976 if_up(struct ifnet *ifp)
1977 {
1978
1979 if_route(ifp, IFF_UP, AF_UNSPEC);
1980 }
1981
1982 /*
1983 * Flush an interface queue.
1984 */
1985 void
1986 if_qflush(struct ifnet *ifp)
1987 {
1988 struct mbuf *m, *n;
1989 struct ifaltq *ifq;
1990
1991 ifq = &ifp->if_snd;
1992 IFQ_LOCK(ifq);
1993 #ifdef ALTQ
1994 if (ALTQ_IS_ENABLED(ifq))
1995 ALTQ_PURGE(ifq);
1996 #endif
1997 n = ifq->ifq_head;
1998 while ((m = n) != 0) {
1999 n = m->m_act;
2000 m_freem(m);
2001 }
2002 ifq->ifq_head = 0;
2003 ifq->ifq_tail = 0;
2004 ifq->ifq_len = 0;
2005 IFQ_UNLOCK(ifq);
2006 }
2007
2008 /*
2009 * Handle interface watchdog timer routines. Called
2010 * from softclock, we decrement timers (if set) and
2011 * call the appropriate interface routine on expiration.
2012 *
2013 * XXXRW: Note that because timeouts run with Giant, if_watchdog() is called
2014 * holding Giant.
2015 */
2016 static void
2017 if_slowtimo(void *arg)
2018 {
2019 VNET_ITERATOR_DECL(vnet_iter);
2020 struct ifnet *ifp;
2021 int s = splimp();
2022
2023 VNET_LIST_RLOCK_NOSLEEP();
2024 IFNET_RLOCK_NOSLEEP();
2025 VNET_FOREACH(vnet_iter) {
2026 CURVNET_SET(vnet_iter);
2027 TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
2028 if (ifp->if_timer == 0 || --ifp->if_timer)
2029 continue;
2030 if (ifp->if_watchdog)
2031 (*ifp->if_watchdog)(ifp);
2032 }
2033 CURVNET_RESTORE();
2034 }
2035 IFNET_RUNLOCK_NOSLEEP();
2036 VNET_LIST_RUNLOCK_NOSLEEP();
2037 splx(s);
2038 timeout(if_slowtimo, (void *)0, hz / IFNET_SLOWHZ);
2039 }
2040
2041 /*
2042 * Map interface name to interface structure pointer, with or without
2043 * returning a reference.
2044 */
2045 struct ifnet *
2046 ifunit_ref(const char *name)
2047 {
2048 struct ifnet *ifp;
2049
2050 IFNET_RLOCK_NOSLEEP();
2051 TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
2052 if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0 &&
2053 !(ifp->if_flags & IFF_DYING))
2054 break;
2055 }
2056 if (ifp != NULL)
2057 if_ref(ifp);
2058 IFNET_RUNLOCK_NOSLEEP();
2059 return (ifp);
2060 }
2061
2062 struct ifnet *
2063 ifunit(const char *name)
2064 {
2065 struct ifnet *ifp;
2066
2067 IFNET_RLOCK_NOSLEEP();
2068 TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
2069 if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0)
2070 break;
2071 }
2072 IFNET_RUNLOCK_NOSLEEP();
2073 return (ifp);
2074 }
2075
2076 /*
2077 * Hardware specific interface ioctls.
2078 */
2079 static int
2080 ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td)
2081 {
2082 struct ifreq *ifr;
2083 struct ifstat *ifs;
2084 int error = 0;
2085 int new_flags, temp_flags;
2086 size_t namelen, onamelen;
2087 size_t descrlen;
2088 char *descrbuf, *odescrbuf;
2089 char new_name[IFNAMSIZ];
2090 struct ifaddr *ifa;
2091 struct sockaddr_dl *sdl;
2092
2093 ifr = (struct ifreq *)data;
2094 switch (cmd) {
2095 case SIOCGIFINDEX:
2096 ifr->ifr_index = ifp->if_index;
2097 break;
2098
2099 case SIOCGIFFLAGS:
2100 temp_flags = ifp->if_flags | ifp->if_drv_flags;
2101 ifr->ifr_flags = temp_flags & 0xffff;
2102 ifr->ifr_flagshigh = temp_flags >> 16;
2103 break;
2104
2105 case SIOCGIFCAP:
2106 ifr->ifr_reqcap = ifp->if_capabilities;
2107 ifr->ifr_curcap = ifp->if_capenable;
2108 break;
2109
2110 #ifdef MAC
2111 case SIOCGIFMAC:
2112 error = mac_ifnet_ioctl_get(td->td_ucred, ifr, ifp);
2113 break;
2114 #endif
2115
2116 case SIOCGIFMETRIC:
2117 ifr->ifr_metric = ifp->if_metric;
2118 break;
2119
2120 case SIOCGIFMTU:
2121 ifr->ifr_mtu = ifp->if_mtu;
2122 break;
2123
2124 case SIOCGIFPHYS:
2125 ifr->ifr_phys = ifp->if_physical;
2126 break;
2127
2128 case SIOCGIFDESCR:
2129 error = 0;
2130 sx_slock(&ifdescr_sx);
2131 if (ifp->if_description == NULL)
2132 error = ENOMSG;
2133 else {
2134 /* space for terminating nul */
2135 descrlen = strlen(ifp->if_description) + 1;
2136 if (ifr->ifr_buffer.length < descrlen)
2137 ifr->ifr_buffer.buffer = NULL;
2138 else
2139 error = copyout(ifp->if_description,
2140 ifr->ifr_buffer.buffer, descrlen);
2141 ifr->ifr_buffer.length = descrlen;
2142 }
2143 sx_sunlock(&ifdescr_sx);
2144 break;
2145
2146 case SIOCSIFDESCR:
2147 error = priv_check(td, PRIV_NET_SETIFDESCR);
2148 if (error)
2149 return (error);
2150
2151 /*
2152 * Copy only (length-1) bytes to make sure that
2153 * if_description is always nul terminated. The
2154 * length parameter is supposed to count the
2155 * terminating nul in.
2156 */
2157 if (ifr->ifr_buffer.length > ifdescr_maxlen)
2158 return (ENAMETOOLONG);
2159 else if (ifr->ifr_buffer.length == 0)
2160 descrbuf = NULL;
2161 else {
2162 descrbuf = malloc(ifr->ifr_buffer.length, M_IFDESCR,
2163 M_WAITOK | M_ZERO);
2164 error = copyin(ifr->ifr_buffer.buffer, descrbuf,
2165 ifr->ifr_buffer.length - 1);
2166 if (error) {
2167 free(descrbuf, M_IFDESCR);
2168 break;
2169 }
2170 }
2171
2172 sx_xlock(&ifdescr_sx);
2173 odescrbuf = ifp->if_description;
2174 ifp->if_description = descrbuf;
2175 sx_xunlock(&ifdescr_sx);
2176
2177 getmicrotime(&ifp->if_lastchange);
2178 free(odescrbuf, M_IFDESCR);
2179 break;
2180
2181 case SIOCSIFFLAGS:
2182 error = priv_check(td, PRIV_NET_SETIFFLAGS);
2183 if (error)
2184 return (error);
2185 /*
2186 * Currently, no driver owned flags pass the IFF_CANTCHANGE
2187 * check, so we don't need special handling here yet.
2188 */
2189 new_flags = (ifr->ifr_flags & 0xffff) |
2190 (ifr->ifr_flagshigh << 16);
2191 if (ifp->if_flags & IFF_SMART) {
2192 /* Smart drivers twiddle their own routes */
2193 } else if (ifp->if_flags & IFF_UP &&
2194 (new_flags & IFF_UP) == 0) {
2195 int s = splimp();
2196 if_down(ifp);
2197 splx(s);
2198 } else if (new_flags & IFF_UP &&
2199 (ifp->if_flags & IFF_UP) == 0) {
2200 int s = splimp();
2201 if_up(ifp);
2202 splx(s);
2203 }
2204 /* See if permanently promiscuous mode bit is about to flip */
2205 if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) {
2206 if (new_flags & IFF_PPROMISC)
2207 ifp->if_flags |= IFF_PROMISC;
2208 else if (ifp->if_pcount == 0)
2209 ifp->if_flags &= ~IFF_PROMISC;
2210 log(LOG_INFO, "%s: permanently promiscuous mode %s\n",
2211 ifp->if_xname,
2212 (new_flags & IFF_PPROMISC) ? "enabled" : "disabled");
2213 }
2214 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) |
2215 (new_flags &~ IFF_CANTCHANGE);
2216 if (ifp->if_ioctl) {
2217 (void) (*ifp->if_ioctl)(ifp, cmd, data);
2218 }
2219 getmicrotime(&ifp->if_lastchange);
2220 break;
2221
2222 case SIOCSIFCAP:
2223 error = priv_check(td, PRIV_NET_SETIFCAP);
2224 if (error)
2225 return (error);
2226 if (ifp->if_ioctl == NULL)
2227 return (EOPNOTSUPP);
2228 if (ifr->ifr_reqcap & ~ifp->if_capabilities)
2229 return (EINVAL);
2230 error = (*ifp->if_ioctl)(ifp, cmd, data);
2231 if (error == 0)
2232 getmicrotime(&ifp->if_lastchange);
2233 break;
2234
2235 #ifdef MAC
2236 case SIOCSIFMAC:
2237 error = mac_ifnet_ioctl_set(td->td_ucred, ifr, ifp);
2238 break;
2239 #endif
2240
2241 case SIOCSIFNAME:
2242 error = priv_check(td, PRIV_NET_SETIFNAME);
2243 if (error)
2244 return (error);
2245 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL);
2246 if (error != 0)
2247 return (error);
2248 if (new_name[0] == '\0')
2249 return (EINVAL);
2250 if (ifunit(new_name) != NULL)
2251 return (EEXIST);
2252
2253 /*
2254 * XXX: Locking. Nothing else seems to lock if_flags,
2255 * and there are numerous other races with the
2256 * ifunit() checks not being atomic with namespace
2257 * changes (renames, vmoves, if_attach, etc).
2258 */
2259 ifp->if_flags |= IFF_RENAMING;
2260
2261 /* Announce the departure of the interface. */
2262 rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
2263 EVENTHANDLER_INVOKE(ifnet_departure_event, ifp);
2264
2265 log(LOG_INFO, "%s: changing name to '%s'\n",
2266 ifp->if_xname, new_name);
2267
2268 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname));
2269 ifa = ifp->if_addr;
2270 IFA_LOCK(ifa);
2271 sdl = (struct sockaddr_dl *)ifa->ifa_addr;
2272 namelen = strlen(new_name);
2273 onamelen = sdl->sdl_nlen;
2274 /*
2275 * Move the address if needed. This is safe because we
2276 * allocate space for a name of length IFNAMSIZ when we
2277 * create this in if_attach().
2278 */
2279 if (namelen != onamelen) {
2280 bcopy(sdl->sdl_data + onamelen,
2281 sdl->sdl_data + namelen, sdl->sdl_alen);
2282 }
2283 bcopy(new_name, sdl->sdl_data, namelen);
2284 sdl->sdl_nlen = namelen;
2285 sdl = (struct sockaddr_dl *)ifa->ifa_netmask;
2286 bzero(sdl->sdl_data, onamelen);
2287 while (namelen != 0)
2288 sdl->sdl_data[--namelen] = 0xff;
2289 IFA_UNLOCK(ifa);
2290
2291 EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp);
2292 /* Announce the return of the interface. */
2293 rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
2294
2295 ifp->if_flags &= ~IFF_RENAMING;
2296 break;
2297
2298 #ifdef VIMAGE
2299 case SIOCSIFVNET:
2300 error = priv_check(td, PRIV_NET_SETIFVNET);
2301 if (error)
2302 return (error);
2303 error = if_vmove_loan(td, ifp, ifr->ifr_name, ifr->ifr_jid);
2304 break;
2305 #endif
2306
2307 case SIOCSIFMETRIC:
2308 error = priv_check(td, PRIV_NET_SETIFMETRIC);
2309 if (error)
2310 return (error);
2311 ifp->if_metric = ifr->ifr_metric;
2312 getmicrotime(&ifp->if_lastchange);
2313 break;
2314
2315 case SIOCSIFPHYS:
2316 error = priv_check(td, PRIV_NET_SETIFPHYS);
2317 if (error)
2318 return (error);
2319 if (ifp->if_ioctl == NULL)
2320 return (EOPNOTSUPP);
2321 error = (*ifp->if_ioctl)(ifp, cmd, data);
2322 if (error == 0)
2323 getmicrotime(&ifp->if_lastchange);
2324 break;
2325
2326 case SIOCSIFMTU:
2327 {
2328 u_long oldmtu = ifp->if_mtu;
2329
2330 error = priv_check(td, PRIV_NET_SETIFMTU);
2331 if (error)
2332 return (error);
2333 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU)
2334 return (EINVAL);
2335 if (ifp->if_ioctl == NULL)
2336 return (EOPNOTSUPP);
2337 error = (*ifp->if_ioctl)(ifp, cmd, data);
2338 if (error == 0) {
2339 getmicrotime(&ifp->if_lastchange);
2340 rt_ifmsg(ifp);
2341 }
2342 /*
2343 * If the link MTU changed, do network layer specific procedure.
2344 */
2345 if (ifp->if_mtu != oldmtu) {
2346 #ifdef INET6
2347 nd6_setmtu(ifp);
2348 #endif
2349 }
2350 break;
2351 }
2352
2353 case SIOCADDMULTI:
2354 case SIOCDELMULTI:
2355 if (cmd == SIOCADDMULTI)
2356 error = priv_check(td, PRIV_NET_ADDMULTI);
2357 else
2358 error = priv_check(td, PRIV_NET_DELMULTI);
2359 if (error)
2360 return (error);
2361
2362 /* Don't allow group membership on non-multicast interfaces. */
2363 if ((ifp->if_flags & IFF_MULTICAST) == 0)
2364 return (EOPNOTSUPP);
2365
2366 /* Don't let users screw up protocols' entries. */
2367 if (ifr->ifr_addr.sa_family != AF_LINK)
2368 return (EINVAL);
2369
2370 if (cmd == SIOCADDMULTI) {
2371 struct ifmultiaddr *ifma;
2372
2373 /*
2374 * Userland is only permitted to join groups once
2375 * via the if_addmulti() KPI, because it cannot hold
2376 * struct ifmultiaddr * between calls. It may also
2377 * lose a race while we check if the membership
2378 * already exists.
2379 */
2380 IF_ADDR_LOCK(ifp);
2381 ifma = if_findmulti(ifp, &ifr->ifr_addr);
2382 IF_ADDR_UNLOCK(ifp);
2383 if (ifma != NULL)
2384 error = EADDRINUSE;
2385 else
2386 error = if_addmulti(ifp, &ifr->ifr_addr, &ifma);
2387 } else {
2388 error = if_delmulti(ifp, &ifr->ifr_addr);
2389 }
2390 if (error == 0)
2391 getmicrotime(&ifp->if_lastchange);
2392 break;
2393
2394 case SIOCSIFPHYADDR:
2395 case SIOCDIFPHYADDR:
2396 #ifdef INET6
2397 case SIOCSIFPHYADDR_IN6:
2398 #endif
2399 case SIOCSLIFPHYADDR:
2400 case SIOCSIFMEDIA:
2401 case SIOCSIFGENERIC:
2402 error = priv_check(td, PRIV_NET_HWIOCTL);
2403 if (error)
2404 return (error);
2405 if (ifp->if_ioctl == NULL)
2406 return (EOPNOTSUPP);
2407 error = (*ifp->if_ioctl)(ifp, cmd, data);
2408 if (error == 0)
2409 getmicrotime(&ifp->if_lastchange);
2410 break;
2411
2412 case SIOCGIFSTATUS:
2413 ifs = (struct ifstat *)data;
2414 ifs->ascii[0] = '\0';
2415
2416 case SIOCGIFPSRCADDR:
2417 case SIOCGIFPDSTADDR:
2418 case SIOCGLIFPHYADDR:
2419 case SIOCGIFMEDIA:
2420 case SIOCGIFGENERIC:
2421 if (ifp->if_ioctl == NULL)
2422 return (EOPNOTSUPP);
2423 error = (*ifp->if_ioctl)(ifp, cmd, data);
2424 break;
2425
2426 case SIOCSIFLLADDR:
2427 error = priv_check(td, PRIV_NET_SETLLADDR);
2428 if (error)
2429 return (error);
2430 error = if_setlladdr(ifp,
2431 ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len);
2432 EVENTHANDLER_INVOKE(iflladdr_event, ifp);
2433 break;
2434
2435 case SIOCAIFGROUP:
2436 {
2437 struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr;
2438
2439 error = priv_check(td, PRIV_NET_ADDIFGROUP);
2440 if (error)
2441 return (error);
2442 if ((error = if_addgroup(ifp, ifgr->ifgr_group)))
2443 return (error);
2444 break;
2445 }
2446
2447 case SIOCGIFGROUP:
2448 if ((error = if_getgroup((struct ifgroupreq *)ifr, ifp)))
2449 return (error);
2450 break;
2451
2452 case SIOCDIFGROUP:
2453 {
2454 struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr;
2455
2456 error = priv_check(td, PRIV_NET_DELIFGROUP);
2457 if (error)
2458 return (error);
2459 if ((error = if_delgroup(ifp, ifgr->ifgr_group)))
2460 return (error);
2461 break;
2462 }
2463
2464 default:
2465 error = ENOIOCTL;
2466 break;
2467 }
2468 return (error);
2469 }
2470
2471 #ifdef COMPAT_FREEBSD32
2472 struct ifconf32 {
2473 int32_t ifc_len;
2474 union {
2475 uint32_t ifcu_buf;
2476 uint32_t ifcu_req;
2477 } ifc_ifcu;
2478 };
2479 #define SIOCGIFCONF32 _IOWR('i', 36, struct ifconf32)
2480 #endif
2481
2482 /*
2483 * Interface ioctls.
2484 */
2485 int
2486 ifioctl(struct socket *so, u_long cmd, caddr_t data, struct thread *td)
2487 {
2488 struct ifnet *ifp;
2489 struct ifreq *ifr;
2490 int error;
2491 int oif_flags;
2492
2493 switch (cmd) {
2494 case SIOCGIFCONF:
2495 case OSIOCGIFCONF:
2496 return (ifconf(cmd, data));
2497
2498 #ifdef COMPAT_FREEBSD32
2499 case SIOCGIFCONF32:
2500 {
2501 struct ifconf32 *ifc32;
2502 struct ifconf ifc;
2503
2504 ifc32 = (struct ifconf32 *)data;
2505 ifc.ifc_len = ifc32->ifc_len;
2506 ifc.ifc_buf = PTRIN(ifc32->ifc_buf);
2507
2508 return (ifconf(SIOCGIFCONF, (void *)&ifc));
2509 }
2510 #endif
2511 }
2512 ifr = (struct ifreq *)data;
2513
2514 switch (cmd) {
2515 #ifdef VIMAGE
2516 case SIOCSIFRVNET:
2517 error = priv_check(td, PRIV_NET_SETIFVNET);
2518 if (error)
2519 return (error);
2520 return (if_vmove_reclaim(td, ifr->ifr_name, ifr->ifr_jid));
2521 #endif
2522 case SIOCIFCREATE:
2523 case SIOCIFCREATE2:
2524 error = priv_check(td, PRIV_NET_IFCREATE);
2525 if (error)
2526 return (error);
2527 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name),
2528 cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL));
2529 case SIOCIFDESTROY:
2530 error = priv_check(td, PRIV_NET_IFDESTROY);
2531 if (error)
2532 return (error);
2533 return if_clone_destroy(ifr->ifr_name);
2534
2535 case SIOCIFGCLONERS:
2536 return (if_clone_list((struct if_clonereq *)data));
2537 case SIOCGIFGMEMB:
2538 return (if_getgroupmembers((struct ifgroupreq *)data));
2539 }
2540
2541 ifp = ifunit_ref(ifr->ifr_name);
2542 if (ifp == NULL)
2543 return (ENXIO);
2544
2545 error = ifhwioctl(cmd, ifp, data, td);
2546 if (error != ENOIOCTL) {
2547 if_rele(ifp);
2548 return (error);
2549 }
2550
2551 oif_flags = ifp->if_flags;
2552 if (so->so_proto == NULL) {
2553 if_rele(ifp);
2554 return (EOPNOTSUPP);
2555 }
2556 #ifndef COMPAT_43
2557 error = ((*so->so_proto->pr_usrreqs->pru_control)(so, cmd,
2558 data,
2559 ifp, td));
2560 if (error == EOPNOTSUPP && ifp != NULL && ifp->if_ioctl != NULL)
2561 error = (*ifp->if_ioctl)(ifp, cmd, data);
2562 #else
2563 {
2564 u_long ocmd = cmd;
2565
2566 switch (cmd) {
2567
2568 case SIOCSIFDSTADDR:
2569 case SIOCSIFADDR:
2570 case SIOCSIFBRDADDR:
2571 case SIOCSIFNETMASK:
2572 #if BYTE_ORDER != BIG_ENDIAN
2573 if (ifr->ifr_addr.sa_family == 0 &&
2574 ifr->ifr_addr.sa_len < 16) {
2575 ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len;
2576 ifr->ifr_addr.sa_len = 16;
2577 }
2578 #else
2579 if (ifr->ifr_addr.sa_len == 0)
2580 ifr->ifr_addr.sa_len = 16;
2581 #endif
2582 break;
2583
2584 case OSIOCGIFADDR:
2585 cmd = SIOCGIFADDR;
2586 break;
2587
2588 case OSIOCGIFDSTADDR:
2589 cmd = SIOCGIFDSTADDR;
2590 break;
2591
2592 case OSIOCGIFBRDADDR:
2593 cmd = SIOCGIFBRDADDR;
2594 break;
2595
2596 case OSIOCGIFNETMASK:
2597 cmd = SIOCGIFNETMASK;
2598 }
2599 error = ((*so->so_proto->pr_usrreqs->pru_control)(so,
2600 cmd,
2601 data,
2602 ifp, td));
2603 if (error == EOPNOTSUPP && ifp != NULL &&
2604 ifp->if_ioctl != NULL)
2605 error = (*ifp->if_ioctl)(ifp, cmd, data);
2606 switch (ocmd) {
2607
2608 case OSIOCGIFADDR:
2609 case OSIOCGIFDSTADDR:
2610 case OSIOCGIFBRDADDR:
2611 case OSIOCGIFNETMASK:
2612 *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family;
2613
2614 }
2615 }
2616 #endif /* COMPAT_43 */
2617
2618 if ((oif_flags ^ ifp->if_flags) & IFF_UP) {
2619 #ifdef INET6
2620 if (ifp->if_flags & IFF_UP) {
2621 int s = splimp();
2622 in6_if_up(ifp);
2623 splx(s);
2624 }
2625 #endif
2626 }
2627 if_rele(ifp);
2628 return (error);
2629 }
2630
2631 /*
2632 * The code common to handling reference counted flags,
2633 * e.g., in ifpromisc() and if_allmulti().
2634 * The "pflag" argument can specify a permanent mode flag to check,
2635 * such as IFF_PPROMISC for promiscuous mode; should be 0 if none.
2636 *
2637 * Only to be used on stack-owned flags, not driver-owned flags.
2638 */
2639 static int
2640 if_setflag(struct ifnet *ifp, int flag, int pflag, int *refcount, int onswitch)
2641 {
2642 struct ifreq ifr;
2643 int error;
2644 int oldflags, oldcount;
2645
2646 /* Sanity checks to catch programming errors */
2647 KASSERT((flag & (IFF_DRV_OACTIVE|IFF_DRV_RUNNING)) == 0,
2648 ("%s: setting driver-owned flag %d", __func__, flag));
2649
2650 if (onswitch)
2651 KASSERT(*refcount >= 0,
2652 ("%s: increment negative refcount %d for flag %d",
2653 __func__, *refcount, flag));
2654 else
2655 KASSERT(*refcount > 0,
2656 ("%s: decrement non-positive refcount %d for flag %d",
2657 __func__, *refcount, flag));
2658
2659 /* In case this mode is permanent, just touch refcount */
2660 if (ifp->if_flags & pflag) {
2661 *refcount += onswitch ? 1 : -1;
2662 return (0);
2663 }
2664
2665 /* Save ifnet parameters for if_ioctl() may fail */
2666 oldcount = *refcount;
2667 oldflags = ifp->if_flags;
2668
2669 /*
2670 * See if we aren't the only and touching refcount is enough.
2671 * Actually toggle interface flag if we are the first or last.
2672 */
2673 if (onswitch) {
2674 if ((*refcount)++)
2675 return (0);
2676 ifp->if_flags |= flag;
2677 } else {
2678 if (--(*refcount))
2679 return (0);
2680 ifp->if_flags &= ~flag;
2681 }
2682
2683 /* Call down the driver since we've changed interface flags */
2684 if (ifp->if_ioctl == NULL) {
2685 error = EOPNOTSUPP;
2686 goto recover;
2687 }
2688 ifr.ifr_flags = ifp->if_flags & 0xffff;
2689 ifr.ifr_flagshigh = ifp->if_flags >> 16;
2690 error = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
2691 if (error)
2692 goto recover;
2693 /* Notify userland that interface flags have changed */
2694 rt_ifmsg(ifp);
2695 return (0);
2696
2697 recover:
2698 /* Recover after driver error */
2699 *refcount = oldcount;
2700 ifp->if_flags = oldflags;
2701 return (error);
2702 }
2703
2704 /*
2705 * Set/clear promiscuous mode on interface ifp based on the truth value
2706 * of pswitch. The calls are reference counted so that only the first
2707 * "on" request actually has an effect, as does the final "off" request.
2708 * Results are undefined if the "off" and "on" requests are not matched.
2709 */
2710 int
2711 ifpromisc(struct ifnet *ifp, int pswitch)
2712 {
2713 int error;
2714 int oldflags = ifp->if_flags;
2715
2716 error = if_setflag(ifp, IFF_PROMISC, IFF_PPROMISC,
2717 &ifp->if_pcount, pswitch);
2718 /* If promiscuous mode status has changed, log a message */
2719 if (error == 0 && ((ifp->if_flags ^ oldflags) & IFF_PROMISC))
2720 log(LOG_INFO, "%s: promiscuous mode %s\n",
2721 ifp->if_xname,
2722 (ifp->if_flags & IFF_PROMISC) ? "enabled" : "disabled");
2723 return (error);
2724 }
2725
2726 /*
2727 * Return interface configuration
2728 * of system. List may be used
2729 * in later ioctl's (above) to get
2730 * other information.
2731 */
2732 /*ARGSUSED*/
2733 static int
2734 ifconf(u_long cmd, caddr_t data)
2735 {
2736 struct ifconf *ifc = (struct ifconf *)data;
2737 struct ifnet *ifp;
2738 struct ifaddr *ifa;
2739 struct ifreq ifr;
2740 struct sbuf *sb;
2741 int error, full = 0, valid_len, max_len;
2742
2743 /* Limit initial buffer size to MAXPHYS to avoid DoS from userspace. */
2744 max_len = MAXPHYS - 1;
2745
2746 /* Prevent hostile input from being able to crash the system */
2747 if (ifc->ifc_len <= 0)
2748 return (EINVAL);
2749
2750 again:
2751 if (ifc->ifc_len <= max_len) {
2752 max_len = ifc->ifc_len;
2753 full = 1;
2754 }
2755 sb = sbuf_new(NULL, NULL, max_len + 1, SBUF_FIXEDLEN);
2756 max_len = 0;
2757 valid_len = 0;
2758
2759 IFNET_RLOCK();
2760 TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
2761 int addrs;
2762
2763 /*
2764 * Zero the ifr_name buffer to make sure we don't
2765 * disclose the contents of the stack.
2766 */
2767 memset(ifr.ifr_name, 0, sizeof(ifr.ifr_name));
2768
2769 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name))
2770 >= sizeof(ifr.ifr_name)) {
2771 sbuf_delete(sb);
2772 IFNET_RUNLOCK();
2773 return (ENAMETOOLONG);
2774 }
2775
2776 addrs = 0;
2777 IF_ADDR_LOCK(ifp);
2778 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
2779 struct sockaddr *sa = ifa->ifa_addr;
2780
2781 if (prison_if(curthread->td_ucred, sa) != 0)
2782 continue;
2783 addrs++;
2784 #ifdef COMPAT_43
2785 if (cmd == OSIOCGIFCONF) {
2786 struct osockaddr *osa =
2787 (struct osockaddr *)&ifr.ifr_addr;
2788 ifr.ifr_addr = *sa;
2789 osa->sa_family = sa->sa_family;
2790 sbuf_bcat(sb, &ifr, sizeof(ifr));
2791 max_len += sizeof(ifr);
2792 } else
2793 #endif
2794 if (sa->sa_len <= sizeof(*sa)) {
2795 ifr.ifr_addr = *sa;
2796 sbuf_bcat(sb, &ifr, sizeof(ifr));
2797 max_len += sizeof(ifr);
2798 } else {
2799 sbuf_bcat(sb, &ifr,
2800 offsetof(struct ifreq, ifr_addr));
2801 max_len += offsetof(struct ifreq, ifr_addr);
2802 sbuf_bcat(sb, sa, sa->sa_len);
2803 max_len += sa->sa_len;
2804 }
2805
2806 if (!sbuf_overflowed(sb))
2807 valid_len = sbuf_len(sb);
2808 }
2809 IF_ADDR_UNLOCK(ifp);
2810 if (addrs == 0) {
2811 bzero((caddr_t)&ifr.ifr_addr, sizeof(ifr.ifr_addr));
2812 sbuf_bcat(sb, &ifr, sizeof(ifr));
2813 max_len += sizeof(ifr);
2814
2815 if (!sbuf_overflowed(sb))
2816 valid_len = sbuf_len(sb);
2817 }
2818 }
2819 IFNET_RUNLOCK();
2820
2821 /*
2822 * If we didn't allocate enough space (uncommon), try again. If
2823 * we have already allocated as much space as we are allowed,
2824 * return what we've got.
2825 */
2826 if (valid_len != max_len && !full) {
2827 sbuf_delete(sb);
2828 goto again;
2829 }
2830
2831 ifc->ifc_len = valid_len;
2832 sbuf_finish(sb);
2833 error = copyout(sbuf_data(sb), ifc->ifc_req, ifc->ifc_len);
2834 sbuf_delete(sb);
2835 return (error);
2836 }
2837
2838 /*
2839 * Just like ifpromisc(), but for all-multicast-reception mode.
2840 */
2841 int
2842 if_allmulti(struct ifnet *ifp, int onswitch)
2843 {
2844
2845 return (if_setflag(ifp, IFF_ALLMULTI, 0, &ifp->if_amcount, onswitch));
2846 }
2847
2848 struct ifmultiaddr *
2849 if_findmulti(struct ifnet *ifp, struct sockaddr *sa)
2850 {
2851 struct ifmultiaddr *ifma;
2852
2853 IF_ADDR_LOCK_ASSERT(ifp);
2854
2855 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2856 if (sa->sa_family == AF_LINK) {
2857 if (sa_dl_equal(ifma->ifma_addr, sa))
2858 break;
2859 } else {
2860 if (sa_equal(ifma->ifma_addr, sa))
2861 break;
2862 }
2863 }
2864
2865 return ifma;
2866 }
2867
2868 /*
2869 * Allocate a new ifmultiaddr and initialize based on passed arguments. We
2870 * make copies of passed sockaddrs. The ifmultiaddr will not be added to
2871 * the ifnet multicast address list here, so the caller must do that and
2872 * other setup work (such as notifying the device driver). The reference
2873 * count is initialized to 1.
2874 */
2875 static struct ifmultiaddr *
2876 if_allocmulti(struct ifnet *ifp, struct sockaddr *sa, struct sockaddr *llsa,
2877 int mflags)
2878 {
2879 struct ifmultiaddr *ifma;
2880 struct sockaddr *dupsa;
2881
2882 ifma = malloc(sizeof *ifma, M_IFMADDR, mflags |
2883 M_ZERO);
2884 if (ifma == NULL)
2885 return (NULL);
2886
2887 dupsa = malloc(sa->sa_len, M_IFMADDR, mflags);
2888 if (dupsa == NULL) {
2889 free(ifma, M_IFMADDR);
2890 return (NULL);
2891 }
2892 bcopy(sa, dupsa, sa->sa_len);
2893 ifma->ifma_addr = dupsa;
2894
2895 ifma->ifma_ifp = ifp;
2896 ifma->ifma_refcount = 1;
2897 ifma->ifma_protospec = NULL;
2898
2899 if (llsa == NULL) {
2900 ifma->ifma_lladdr = NULL;
2901 return (ifma);
2902 }
2903
2904 dupsa = malloc(llsa->sa_len, M_IFMADDR, mflags);
2905 if (dupsa == NULL) {
2906 free(ifma->ifma_addr, M_IFMADDR);
2907 free(ifma, M_IFMADDR);
2908 return (NULL);
2909 }
2910 bcopy(llsa, dupsa, llsa->sa_len);
2911 ifma->ifma_lladdr = dupsa;
2912
2913 return (ifma);
2914 }
2915
2916 /*
2917 * if_freemulti: free ifmultiaddr structure and possibly attached related
2918 * addresses. The caller is responsible for implementing reference
2919 * counting, notifying the driver, handling routing messages, and releasing
2920 * any dependent link layer state.
2921 */
2922 static void
2923 if_freemulti(struct ifmultiaddr *ifma)
2924 {
2925
2926 KASSERT(ifma->ifma_refcount == 0, ("if_freemulti: refcount %d",
2927 ifma->ifma_refcount));
2928 KASSERT(ifma->ifma_protospec == NULL,
2929 ("if_freemulti: protospec not NULL"));
2930
2931 if (ifma->ifma_lladdr != NULL)
2932 free(ifma->ifma_lladdr, M_IFMADDR);
2933 free(ifma->ifma_addr, M_IFMADDR);
2934 free(ifma, M_IFMADDR);
2935 }
2936
2937 /*
2938 * Register an additional multicast address with a network interface.
2939 *
2940 * - If the address is already present, bump the reference count on the
2941 * address and return.
2942 * - If the address is not link-layer, look up a link layer address.
2943 * - Allocate address structures for one or both addresses, and attach to the
2944 * multicast address list on the interface. If automatically adding a link
2945 * layer address, the protocol address will own a reference to the link
2946 * layer address, to be freed when it is freed.
2947 * - Notify the network device driver of an addition to the multicast address
2948 * list.
2949 *
2950 * 'sa' points to caller-owned memory with the desired multicast address.
2951 *
2952 * 'retifma' will be used to return a pointer to the resulting multicast
2953 * address reference, if desired.
2954 */
2955 int
2956 if_addmulti(struct ifnet *ifp, struct sockaddr *sa,
2957 struct ifmultiaddr **retifma)
2958 {
2959 struct ifmultiaddr *ifma, *ll_ifma;
2960 struct sockaddr *llsa;
2961 int error;
2962
2963 /*
2964 * If the address is already present, return a new reference to it;
2965 * otherwise, allocate storage and set up a new address.
2966 */
2967 IF_ADDR_LOCK(ifp);
2968 ifma = if_findmulti(ifp, sa);
2969 if (ifma != NULL) {
2970 ifma->ifma_refcount++;
2971 if (retifma != NULL)
2972 *retifma = ifma;
2973 IF_ADDR_UNLOCK(ifp);
2974 return (0);
2975 }
2976
2977 /*
2978 * The address isn't already present; resolve the protocol address
2979 * into a link layer address, and then look that up, bump its
2980 * refcount or allocate an ifma for that also. If 'llsa' was
2981 * returned, we will need to free it later.
2982 */
2983 llsa = NULL;
2984 ll_ifma = NULL;
2985 if (ifp->if_resolvemulti != NULL) {
2986 error = ifp->if_resolvemulti(ifp, &llsa, sa);
2987 if (error)
2988 goto unlock_out;
2989 }
2990
2991 /*
2992 * Allocate the new address. Don't hook it up yet, as we may also
2993 * need to allocate a link layer multicast address.
2994 */
2995 ifma = if_allocmulti(ifp, sa, llsa, M_NOWAIT);
2996 if (ifma == NULL) {
2997 error = ENOMEM;
2998 goto free_llsa_out;
2999 }
3000
3001 /*
3002 * If a link layer address is found, we'll need to see if it's
3003 * already present in the address list, or allocate is as well.
3004 * When this block finishes, the link layer address will be on the
3005 * list.
3006 */
3007 if (llsa != NULL) {
3008 ll_ifma = if_findmulti(ifp, llsa);
3009 if (ll_ifma == NULL) {
3010 ll_ifma = if_allocmulti(ifp, llsa, NULL, M_NOWAIT);
3011 if (ll_ifma == NULL) {
3012 --ifma->ifma_refcount;
3013 if_freemulti(ifma);
3014 error = ENOMEM;
3015 goto free_llsa_out;
3016 }
3017 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ll_ifma,
3018 ifma_link);
3019 } else
3020 ll_ifma->ifma_refcount++;
3021 ifma->ifma_llifma = ll_ifma;
3022 }
3023
3024 /*
3025 * We now have a new multicast address, ifma, and possibly a new or
3026 * referenced link layer address. Add the primary address to the
3027 * ifnet address list.
3028 */
3029 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
3030
3031 if (retifma != NULL)
3032 *retifma = ifma;
3033
3034 /*
3035 * Must generate the message while holding the lock so that 'ifma'
3036 * pointer is still valid.
3037 */
3038 rt_newmaddrmsg(RTM_NEWMADDR, ifma);
3039 IF_ADDR_UNLOCK(ifp);
3040
3041 /*
3042 * We are certain we have added something, so call down to the
3043 * interface to let them know about it.
3044 */
3045 if (ifp->if_ioctl != NULL) {
3046 (void) (*ifp->if_ioctl)(ifp, SIOCADDMULTI, 0);
3047 }
3048
3049 if (llsa != NULL)
3050 free(llsa, M_IFMADDR);
3051
3052 return (0);
3053
3054 free_llsa_out:
3055 if (llsa != NULL)
3056 free(llsa, M_IFMADDR);
3057
3058 unlock_out:
3059 IF_ADDR_UNLOCK(ifp);
3060 return (error);
3061 }
3062
3063 /*
3064 * Delete a multicast group membership by network-layer group address.
3065 *
3066 * Returns ENOENT if the entry could not be found. If ifp no longer
3067 * exists, results are undefined. This entry point should only be used
3068 * from subsystems which do appropriate locking to hold ifp for the
3069 * duration of the call.
3070 * Network-layer protocol domains must use if_delmulti_ifma().
3071 */
3072 int
3073 if_delmulti(struct ifnet *ifp, struct sockaddr *sa)
3074 {
3075 struct ifmultiaddr *ifma;
3076 int lastref;
3077 #ifdef INVARIANTS
3078 struct ifnet *oifp;
3079
3080 IFNET_RLOCK_NOSLEEP();
3081 TAILQ_FOREACH(oifp, &V_ifnet, if_link)
3082 if (ifp == oifp)
3083 break;
3084 if (ifp != oifp)
3085 ifp = NULL;
3086 IFNET_RUNLOCK_NOSLEEP();
3087
3088 KASSERT(ifp != NULL, ("%s: ifnet went away", __func__));
3089 #endif
3090 if (ifp == NULL)
3091 return (ENOENT);
3092
3093 IF_ADDR_LOCK(ifp);
3094 lastref = 0;
3095 ifma = if_findmulti(ifp, sa);
3096 if (ifma != NULL)
3097 lastref = if_delmulti_locked(ifp, ifma, 0);
3098 IF_ADDR_UNLOCK(ifp);
3099
3100 if (ifma == NULL)
3101 return (ENOENT);
3102
3103 if (lastref && ifp->if_ioctl != NULL) {
3104 (void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0);
3105 }
3106
3107 return (0);
3108 }
3109
3110 /*
3111 * Delete all multicast group membership for an interface.
3112 * Should be used to quickly flush all multicast filters.
3113 */
3114 void
3115 if_delallmulti(struct ifnet *ifp)
3116 {
3117 struct ifmultiaddr *ifma;
3118 struct ifmultiaddr *next;
3119
3120 IF_ADDR_LOCK(ifp);
3121 TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next)
3122 if_delmulti_locked(ifp, ifma, 0);
3123 IF_ADDR_UNLOCK(ifp);
3124 }
3125
3126 /*
3127 * Delete a multicast group membership by group membership pointer.
3128 * Network-layer protocol domains must use this routine.
3129 *
3130 * It is safe to call this routine if the ifp disappeared.
3131 */
3132 void
3133 if_delmulti_ifma(struct ifmultiaddr *ifma)
3134 {
3135 struct ifnet *ifp;
3136 int lastref;
3137
3138 ifp = ifma->ifma_ifp;
3139 #ifdef DIAGNOSTIC
3140 if (ifp == NULL) {
3141 printf("%s: ifma_ifp seems to be detached\n", __func__);
3142 } else {
3143 struct ifnet *oifp;
3144
3145 IFNET_RLOCK_NOSLEEP();
3146 TAILQ_FOREACH(oifp, &V_ifnet, if_link)
3147 if (ifp == oifp)
3148 break;
3149 if (ifp != oifp) {
3150 printf("%s: ifnet %p disappeared\n", __func__, ifp);
3151 ifp = NULL;
3152 }
3153 IFNET_RUNLOCK_NOSLEEP();
3154 }
3155 #endif
3156 /*
3157 * If and only if the ifnet instance exists: Acquire the address lock.
3158 */
3159 if (ifp != NULL)
3160 IF_ADDR_LOCK(ifp);
3161
3162 lastref = if_delmulti_locked(ifp, ifma, 0);
3163
3164 if (ifp != NULL) {
3165 /*
3166 * If and only if the ifnet instance exists:
3167 * Release the address lock.
3168 * If the group was left: update the hardware hash filter.
3169 */
3170 IF_ADDR_UNLOCK(ifp);
3171 if (lastref && ifp->if_ioctl != NULL) {
3172 (void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0);
3173 }
3174 }
3175 }
3176
3177 /*
3178 * Perform deletion of network-layer and/or link-layer multicast address.
3179 *
3180 * Return 0 if the reference count was decremented.
3181 * Return 1 if the final reference was released, indicating that the
3182 * hardware hash filter should be reprogrammed.
3183 */
3184 static int
3185 if_delmulti_locked(struct ifnet *ifp, struct ifmultiaddr *ifma, int detaching)
3186 {
3187 struct ifmultiaddr *ll_ifma;
3188
3189 if (ifp != NULL && ifma->ifma_ifp != NULL) {
3190 KASSERT(ifma->ifma_ifp == ifp,
3191 ("%s: inconsistent ifp %p", __func__, ifp));
3192 IF_ADDR_LOCK_ASSERT(ifp);
3193 }
3194
3195 ifp = ifma->ifma_ifp;
3196
3197 /*
3198 * If the ifnet is detaching, null out references to ifnet,
3199 * so that upper protocol layers will notice, and not attempt
3200 * to obtain locks for an ifnet which no longer exists. The
3201 * routing socket announcement must happen before the ifnet
3202 * instance is detached from the system.
3203 */
3204 if (detaching) {
3205 #ifdef DIAGNOSTIC
3206 printf("%s: detaching ifnet instance %p\n", __func__, ifp);
3207 #endif
3208 /*
3209 * ifp may already be nulled out if we are being reentered
3210 * to delete the ll_ifma.
3211 */
3212 if (ifp != NULL) {
3213 rt_newmaddrmsg(RTM_DELMADDR, ifma);
3214 ifma->ifma_ifp = NULL;
3215 }
3216 }
3217
3218 if (--ifma->ifma_refcount > 0)
3219 return 0;
3220
3221 /*
3222 * If this ifma is a network-layer ifma, a link-layer ifma may
3223 * have been associated with it. Release it first if so.
3224 */
3225 ll_ifma = ifma->ifma_llifma;
3226 if (ll_ifma != NULL) {
3227 KASSERT(ifma->ifma_lladdr != NULL,
3228 ("%s: llifma w/o lladdr", __func__));
3229 if (detaching)
3230 ll_ifma->ifma_ifp = NULL; /* XXX */
3231 if (--ll_ifma->ifma_refcount == 0) {
3232 if (ifp != NULL) {
3233 TAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma,
3234 ifma_link);
3235 }
3236 if_freemulti(ll_ifma);
3237 }
3238 }
3239
3240 if (ifp != NULL)
3241 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
3242
3243 if_freemulti(ifma);
3244
3245 /*
3246 * The last reference to this instance of struct ifmultiaddr
3247 * was released; the hardware should be notified of this change.
3248 */
3249 return 1;
3250 }
3251
3252 /*
3253 * Set the link layer address on an interface.
3254 *
3255 * At this time we only support certain types of interfaces,
3256 * and we don't allow the length of the address to change.
3257 */
3258 int
3259 if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len)
3260 {
3261 struct sockaddr_dl *sdl;
3262 struct ifaddr *ifa;
3263 struct ifreq ifr;
3264
3265 IF_ADDR_LOCK(ifp);
3266 ifa = ifp->if_addr;
3267 if (ifa == NULL) {
3268 IF_ADDR_UNLOCK(ifp);
3269 return (EINVAL);
3270 }
3271 ifa_ref(ifa);
3272 IF_ADDR_UNLOCK(ifp);
3273 sdl = (struct sockaddr_dl *)ifa->ifa_addr;
3274 if (sdl == NULL) {
3275 ifa_free(ifa);
3276 return (EINVAL);
3277 }
3278 if (len != sdl->sdl_alen) { /* don't allow length to change */
3279 ifa_free(ifa);
3280 return (EINVAL);
3281 }
3282 switch (ifp->if_type) {
3283 case IFT_ETHER:
3284 case IFT_FDDI:
3285 case IFT_XETHER:
3286 case IFT_ISO88025:
3287 case IFT_L2VLAN:
3288 case IFT_BRIDGE:
3289 case IFT_ARCNET:
3290 case IFT_IEEE8023ADLAG:
3291 case IFT_IEEE80211:
3292 bcopy(lladdr, LLADDR(sdl), len);
3293 ifa_free(ifa);
3294 break;
3295 default:
3296 ifa_free(ifa);
3297 return (ENODEV);
3298 }
3299
3300 /*
3301 * If the interface is already up, we need
3302 * to re-init it in order to reprogram its
3303 * address filter.
3304 */
3305 if ((ifp->if_flags & IFF_UP) != 0) {
3306 if (ifp->if_ioctl) {
3307 ifp->if_flags &= ~IFF_UP;
3308 ifr.ifr_flags = ifp->if_flags & 0xffff;
3309 ifr.ifr_flagshigh = ifp->if_flags >> 16;
3310 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
3311 ifp->if_flags |= IFF_UP;
3312 ifr.ifr_flags = ifp->if_flags & 0xffff;
3313 ifr.ifr_flagshigh = ifp->if_flags >> 16;
3314 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
3315 }
3316 #ifdef INET
3317 /*
3318 * Also send gratuitous ARPs to notify other nodes about
3319 * the address change.
3320 */
3321 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
3322 if (ifa->ifa_addr->sa_family == AF_INET)
3323 arp_ifinit(ifp, ifa);
3324 }
3325 #endif
3326 }
3327 return (0);
3328 }
3329
3330 /*
3331 * The name argument must be a pointer to storage which will last as
3332 * long as the interface does. For physical devices, the result of
3333 * device_get_name(dev) is a good choice and for pseudo-devices a
3334 * static string works well.
3335 */
3336 void
3337 if_initname(struct ifnet *ifp, const char *name, int unit)
3338 {
3339 ifp->if_dname = name;
3340 ifp->if_dunit = unit;
3341 if (unit != IF_DUNIT_NONE)
3342 snprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit);
3343 else
3344 strlcpy(ifp->if_xname, name, IFNAMSIZ);
3345 }
3346
3347 int
3348 if_printf(struct ifnet *ifp, const char * fmt, ...)
3349 {
3350 va_list ap;
3351 int retval;
3352
3353 retval = printf("%s: ", ifp->if_xname);
3354 va_start(ap, fmt);
3355 retval += vprintf(fmt, ap);
3356 va_end(ap);
3357 return (retval);
3358 }
3359
3360 void
3361 if_start(struct ifnet *ifp)
3362 {
3363
3364 (*(ifp)->if_start)(ifp);
3365 }
3366
3367 /*
3368 * Backwards compatibility interface for drivers
3369 * that have not implemented it
3370 */
3371 static int
3372 if_transmit(struct ifnet *ifp, struct mbuf *m)
3373 {
3374 int error;
3375
3376 IFQ_HANDOFF(ifp, m, error);
3377 return (error);
3378 }
3379
3380 int
3381 if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp, int adjust)
3382 {
3383 int active = 0;
3384
3385 IF_LOCK(ifq);
3386 if (_IF_QFULL(ifq)) {
3387 _IF_DROP(ifq);
3388 IF_UNLOCK(ifq);
3389 m_freem(m);
3390 return (0);
3391 }
3392 if (ifp != NULL) {
3393 ifp->if_obytes += m->m_pkthdr.len + adjust;
3394 if (m->m_flags & (M_BCAST|M_MCAST))
3395 ifp->if_omcasts++;
3396 active = ifp->if_drv_flags & IFF_DRV_OACTIVE;
3397 }
3398 _IF_ENQUEUE(ifq, m);
3399 IF_UNLOCK(ifq);
3400 if (ifp != NULL && !active)
3401 (*(ifp)->if_start)(ifp);
3402 return (1);
3403 }
3404
3405 void
3406 if_register_com_alloc(u_char type,
3407 if_com_alloc_t *a, if_com_free_t *f)
3408 {
3409
3410 KASSERT(if_com_alloc[type] == NULL,
3411 ("if_register_com_alloc: %d already registered", type));
3412 KASSERT(if_com_free[type] == NULL,
3413 ("if_register_com_alloc: %d free already registered", type));
3414
3415 if_com_alloc[type] = a;
3416 if_com_free[type] = f;
3417 }
3418
3419 void
3420 if_deregister_com_alloc(u_char type)
3421 {
3422
3423 KASSERT(if_com_alloc[type] != NULL,
3424 ("if_deregister_com_alloc: %d not registered", type));
3425 KASSERT(if_com_free[type] != NULL,
3426 ("if_deregister_com_alloc: %d free not registered", type));
3427 if_com_alloc[type] = NULL;
3428 if_com_free[type] = NULL;
3429 }
Cache object: ad1d20e9dad86827d0b0a7b441375a5b
|