FreeBSD/Linux Kernel Cross Reference
sys/netinet6/mld6.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2009 Bruce Simpson.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote
15 * products derived from this software without specific prior written
16 * permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $KAME: mld6.c,v 1.27 2001/04/04 05:17:30 itojun Exp $
31 */
32
33 /*-
34 * Copyright (c) 1988 Stephen Deering.
35 * Copyright (c) 1992, 1993
36 * The Regents of the University of California. All rights reserved.
37 *
38 * This code is derived from software contributed to Berkeley by
39 * Stephen Deering of Stanford University.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
66 */
67
68 #include <sys/cdefs.h>
69 __FBSDID("$FreeBSD$");
70
71 #include "opt_inet.h"
72 #include "opt_inet6.h"
73
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/mbuf.h>
77 #include <sys/socket.h>
78 #include <sys/protosw.h>
79 #include <sys/sysctl.h>
80 #include <sys/kernel.h>
81 #include <sys/callout.h>
82 #include <sys/malloc.h>
83 #include <sys/module.h>
84 #include <sys/ktr.h>
85
86 #include <net/if.h>
87 #include <net/if_var.h>
88 #include <net/route.h>
89 #include <net/vnet.h>
90
91 #include <netinet/in.h>
92 #include <netinet/in_var.h>
93 #include <netinet6/in6_var.h>
94 #include <netinet/ip6.h>
95 #include <netinet6/ip6_var.h>
96 #include <netinet6/scope6_var.h>
97 #include <netinet/icmp6.h>
98 #include <netinet6/mld6.h>
99 #include <netinet6/mld6_var.h>
100
101 #include <security/mac/mac_framework.h>
102
103 #ifndef KTR_MLD
104 #define KTR_MLD KTR_INET6
105 #endif
106
107 static struct mld_ifsoftc *
108 mli_alloc_locked(struct ifnet *);
109 static void mli_delete_locked(const struct ifnet *);
110 static void mld_dispatch_packet(struct mbuf *);
111 static void mld_dispatch_queue(struct mbufq *, int);
112 static void mld_final_leave(struct in6_multi *, struct mld_ifsoftc *);
113 static void mld_fasttimo_vnet(struct in6_multi_head *inmh);
114 static int mld_handle_state_change(struct in6_multi *,
115 struct mld_ifsoftc *);
116 static int mld_initial_join(struct in6_multi *, struct mld_ifsoftc *,
117 const int);
118 #ifdef KTR
119 static char * mld_rec_type_to_str(const int);
120 #endif
121 static void mld_set_version(struct mld_ifsoftc *, const int);
122 static void mld_slowtimo_vnet(void);
123 static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
124 /*const*/ struct mld_hdr *);
125 static int mld_v1_input_report(struct ifnet *, const struct ip6_hdr *,
126 /*const*/ struct mld_hdr *);
127 static void mld_v1_process_group_timer(struct in6_multi_head *,
128 struct in6_multi *);
129 static void mld_v1_process_querier_timers(struct mld_ifsoftc *);
130 static int mld_v1_transmit_report(struct in6_multi *, const int);
131 static void mld_v1_update_group(struct in6_multi *, const int);
132 static void mld_v2_cancel_link_timers(struct mld_ifsoftc *);
133 static void mld_v2_dispatch_general_query(struct mld_ifsoftc *);
134 static struct mbuf *
135 mld_v2_encap_report(struct ifnet *, struct mbuf *);
136 static int mld_v2_enqueue_filter_change(struct mbufq *,
137 struct in6_multi *);
138 static int mld_v2_enqueue_group_record(struct mbufq *,
139 struct in6_multi *, const int, const int, const int,
140 const int);
141 static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
142 struct mbuf *, struct mldv2_query *, const int, const int);
143 static int mld_v2_merge_state_changes(struct in6_multi *,
144 struct mbufq *);
145 static void mld_v2_process_group_timers(struct in6_multi_head *,
146 struct mbufq *, struct mbufq *,
147 struct in6_multi *, const int);
148 static int mld_v2_process_group_query(struct in6_multi *,
149 struct mld_ifsoftc *mli, int, struct mbuf *,
150 struct mldv2_query *, const int);
151 static int sysctl_mld_gsr(SYSCTL_HANDLER_ARGS);
152 static int sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS);
153
154 /*
155 * Normative references: RFC 2710, RFC 3590, RFC 3810.
156 *
157 * Locking:
158 * * The MLD subsystem lock ends up being system-wide for the moment,
159 * but could be per-VIMAGE later on.
160 * * The permitted lock order is: IN6_MULTI_LOCK, MLD_LOCK, IF_ADDR_LOCK.
161 * Any may be taken independently; if any are held at the same
162 * time, the above lock order must be followed.
163 * * IN6_MULTI_LOCK covers in_multi.
164 * * MLD_LOCK covers per-link state and any global variables in this file.
165 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
166 * per-link state iterators.
167 *
168 * XXX LOR PREVENTION
169 * A special case for IPv6 is the in6_setscope() routine. ip6_output()
170 * will not accept an ifp; it wants an embedded scope ID, unlike
171 * ip_output(), which happily takes the ifp given to it. The embedded
172 * scope ID is only used by MLD to select the outgoing interface.
173 *
174 * During interface attach and detach, MLD will take MLD_LOCK *after*
175 * the IF_AFDATA_LOCK.
176 * As in6_setscope() takes IF_AFDATA_LOCK then SCOPE_LOCK, we can't call
177 * it with MLD_LOCK held without triggering an LOR. A netisr with indirect
178 * dispatch could work around this, but we'd rather not do that, as it
179 * can introduce other races.
180 *
181 * As such, we exploit the fact that the scope ID is just the interface
182 * index, and embed it in the IPv6 destination address accordingly.
183 * This is potentially NOT VALID for MLDv1 reports, as they
184 * are always sent to the multicast group itself; as MLDv2
185 * reports are always sent to ff02::16, this is not an issue
186 * when MLDv2 is in use.
187 *
188 * This does not however eliminate the LOR when ip6_output() itself
189 * calls in6_setscope() internally whilst MLD_LOCK is held. This will
190 * trigger a LOR warning in WITNESS when the ifnet is detached.
191 *
192 * The right answer is probably to make IF_AFDATA_LOCK an rwlock, given
193 * how it's used across the network stack. Here we're simply exploiting
194 * the fact that MLD runs at a similar layer in the stack to scope6.c.
195 *
196 * VIMAGE:
197 * * Each in6_multi corresponds to an ifp, and each ifp corresponds
198 * to a vnet in ifp->if_vnet.
199 */
200 static struct mtx mld_mtx;
201 static MALLOC_DEFINE(M_MLD, "mld", "mld state");
202
203 #define MLD_EMBEDSCOPE(pin6, zoneid) \
204 if (IN6_IS_SCOPE_LINKLOCAL(pin6) || \
205 IN6_IS_ADDR_MC_INTFACELOCAL(pin6)) \
206 (pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF) \
207
208 /*
209 * VIMAGE-wide globals.
210 */
211 VNET_DEFINE_STATIC(struct timeval, mld_gsrdelay) = {10, 0};
212 VNET_DEFINE_STATIC(LIST_HEAD(, mld_ifsoftc), mli_head);
213 VNET_DEFINE_STATIC(int, interface_timers_running6);
214 VNET_DEFINE_STATIC(int, state_change_timers_running6);
215 VNET_DEFINE_STATIC(int, current_state_timers_running6);
216
217 #define V_mld_gsrdelay VNET(mld_gsrdelay)
218 #define V_mli_head VNET(mli_head)
219 #define V_interface_timers_running6 VNET(interface_timers_running6)
220 #define V_state_change_timers_running6 VNET(state_change_timers_running6)
221 #define V_current_state_timers_running6 VNET(current_state_timers_running6)
222
223 SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */
224
225 SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
226 "IPv6 Multicast Listener Discovery");
227
228 /*
229 * Virtualized sysctls.
230 */
231 SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay,
232 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
233 &VNET_NAME(mld_gsrdelay.tv_sec), 0, sysctl_mld_gsr, "I",
234 "Rate limit for MLDv2 Group-and-Source queries in seconds");
235
236 /*
237 * Non-virtualized sysctls.
238 */
239 static SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo,
240 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_mld_ifinfo,
241 "Per-interface MLDv2 state");
242
243 static int mld_v1enable = 1;
244 SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RWTUN,
245 &mld_v1enable, 0, "Enable fallback to MLDv1");
246
247 static int mld_v2enable = 1;
248 SYSCTL_INT(_net_inet6_mld, OID_AUTO, v2enable, CTLFLAG_RWTUN,
249 &mld_v2enable, 0, "Enable MLDv2");
250
251 static int mld_use_allow = 1;
252 SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RWTUN,
253 &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
254
255 /*
256 * Packed Router Alert option structure declaration.
257 */
258 struct mld_raopt {
259 struct ip6_hbh hbh;
260 struct ip6_opt pad;
261 struct ip6_opt_router ra;
262 } __packed;
263
264 /*
265 * Router Alert hop-by-hop option header.
266 */
267 static struct mld_raopt mld_ra = {
268 .hbh = { 0, 0 },
269 .pad = { .ip6o_type = IP6OPT_PADN, 0 },
270 .ra = {
271 .ip6or_type = IP6OPT_ROUTER_ALERT,
272 .ip6or_len = IP6OPT_RTALERT_LEN - 2,
273 .ip6or_value[0] = ((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
274 .ip6or_value[1] = (IP6OPT_RTALERT_MLD & 0xFF)
275 }
276 };
277 static struct ip6_pktopts mld_po;
278
279 static __inline void
280 mld_save_context(struct mbuf *m, struct ifnet *ifp)
281 {
282
283 #ifdef VIMAGE
284 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
285 #endif /* VIMAGE */
286 m->m_pkthdr.rcvif = ifp;
287 m->m_pkthdr.flowid = ifp->if_index;
288 }
289
290 static __inline void
291 mld_scrub_context(struct mbuf *m)
292 {
293
294 m->m_pkthdr.PH_loc.ptr = NULL;
295 m->m_pkthdr.flowid = 0;
296 }
297
298 /*
299 * Restore context from a queued output chain.
300 * Return saved ifindex.
301 *
302 * VIMAGE: The assertion is there to make sure that we
303 * actually called CURVNET_SET() with what's in the mbuf chain.
304 */
305 static __inline uint32_t
306 mld_restore_context(struct mbuf *m)
307 {
308
309 #if defined(VIMAGE) && defined(INVARIANTS)
310 KASSERT(curvnet == m->m_pkthdr.PH_loc.ptr,
311 ("%s: called when curvnet was not restored: cuvnet %p m ptr %p",
312 __func__, curvnet, m->m_pkthdr.PH_loc.ptr));
313 #endif
314 return (m->m_pkthdr.flowid);
315 }
316
317 /*
318 * Retrieve or set threshold between group-source queries in seconds.
319 *
320 * VIMAGE: Assume curvnet set by caller.
321 * SMPng: NOTE: Serialized by MLD lock.
322 */
323 static int
324 sysctl_mld_gsr(SYSCTL_HANDLER_ARGS)
325 {
326 int error;
327 int i;
328
329 error = sysctl_wire_old_buffer(req, sizeof(int));
330 if (error)
331 return (error);
332
333 MLD_LOCK();
334
335 i = V_mld_gsrdelay.tv_sec;
336
337 error = sysctl_handle_int(oidp, &i, 0, req);
338 if (error || !req->newptr)
339 goto out_locked;
340
341 if (i < -1 || i >= 60) {
342 error = EINVAL;
343 goto out_locked;
344 }
345
346 CTR2(KTR_MLD, "change mld_gsrdelay from %d to %d",
347 V_mld_gsrdelay.tv_sec, i);
348 V_mld_gsrdelay.tv_sec = i;
349
350 out_locked:
351 MLD_UNLOCK();
352 return (error);
353 }
354
355 /*
356 * Expose struct mld_ifsoftc to userland, keyed by ifindex.
357 * For use by ifmcstat(8).
358 *
359 * SMPng: NOTE: Does an unlocked ifindex space read.
360 * VIMAGE: Assume curvnet set by caller. The node handler itself
361 * is not directly virtualized.
362 */
363 static int
364 sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS)
365 {
366 int *name;
367 int error;
368 u_int namelen;
369 struct ifnet *ifp;
370 struct mld_ifsoftc *mli;
371
372 name = (int *)arg1;
373 namelen = arg2;
374
375 if (req->newptr != NULL)
376 return (EPERM);
377
378 if (namelen != 1)
379 return (EINVAL);
380
381 error = sysctl_wire_old_buffer(req, sizeof(struct mld_ifinfo));
382 if (error)
383 return (error);
384
385 IN6_MULTI_LOCK();
386 IN6_MULTI_LIST_LOCK();
387 MLD_LOCK();
388
389 if (name[0] <= 0 || name[0] > V_if_index) {
390 error = ENOENT;
391 goto out_locked;
392 }
393
394 error = ENOENT;
395
396 ifp = ifnet_byindex(name[0]);
397 if (ifp == NULL)
398 goto out_locked;
399
400 LIST_FOREACH(mli, &V_mli_head, mli_link) {
401 if (ifp == mli->mli_ifp) {
402 struct mld_ifinfo info;
403
404 info.mli_version = mli->mli_version;
405 info.mli_v1_timer = mli->mli_v1_timer;
406 info.mli_v2_timer = mli->mli_v2_timer;
407 info.mli_flags = mli->mli_flags;
408 info.mli_rv = mli->mli_rv;
409 info.mli_qi = mli->mli_qi;
410 info.mli_qri = mli->mli_qri;
411 info.mli_uri = mli->mli_uri;
412 error = SYSCTL_OUT(req, &info, sizeof(info));
413 break;
414 }
415 }
416
417 out_locked:
418 MLD_UNLOCK();
419 IN6_MULTI_LIST_UNLOCK();
420 IN6_MULTI_UNLOCK();
421 return (error);
422 }
423
424 /*
425 * Dispatch an entire queue of pending packet chains.
426 * VIMAGE: Assumes the vnet pointer has been set.
427 */
428 static void
429 mld_dispatch_queue(struct mbufq *mq, int limit)
430 {
431 struct mbuf *m;
432
433 while ((m = mbufq_dequeue(mq)) != NULL) {
434 CTR3(KTR_MLD, "%s: dispatch %p from %p", __func__, mq, m);
435 mld_dispatch_packet(m);
436 if (--limit == 0)
437 break;
438 }
439 }
440
441 /*
442 * Filter outgoing MLD report state by group.
443 *
444 * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
445 * and node-local addresses. However, kernel and socket consumers
446 * always embed the KAME scope ID in the address provided, so strip it
447 * when performing comparison.
448 * Note: This is not the same as the *multicast* scope.
449 *
450 * Return zero if the given group is one for which MLD reports
451 * should be suppressed, or non-zero if reports should be issued.
452 */
453 static __inline int
454 mld_is_addr_reported(const struct in6_addr *addr)
455 {
456
457 KASSERT(IN6_IS_ADDR_MULTICAST(addr), ("%s: not multicast", __func__));
458
459 if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL)
460 return (0);
461
462 if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) {
463 struct in6_addr tmp = *addr;
464 in6_clearscope(&tmp);
465 if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes))
466 return (0);
467 }
468
469 return (1);
470 }
471
472 /*
473 * Attach MLD when PF_INET6 is attached to an interface.
474 *
475 * SMPng: Normally called with IF_AFDATA_LOCK held.
476 */
477 struct mld_ifsoftc *
478 mld_domifattach(struct ifnet *ifp)
479 {
480 struct mld_ifsoftc *mli;
481
482 CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
483 __func__, ifp, if_name(ifp));
484
485 MLD_LOCK();
486
487 mli = mli_alloc_locked(ifp);
488 if (!(ifp->if_flags & IFF_MULTICAST))
489 mli->mli_flags |= MLIF_SILENT;
490 if (mld_use_allow)
491 mli->mli_flags |= MLIF_USEALLOW;
492
493 MLD_UNLOCK();
494
495 return (mli);
496 }
497
498 /*
499 * VIMAGE: assume curvnet set by caller.
500 */
501 static struct mld_ifsoftc *
502 mli_alloc_locked(/*const*/ struct ifnet *ifp)
503 {
504 struct mld_ifsoftc *mli;
505
506 MLD_LOCK_ASSERT();
507
508 mli = malloc(sizeof(struct mld_ifsoftc), M_MLD, M_NOWAIT|M_ZERO);
509 if (mli == NULL)
510 goto out;
511
512 mli->mli_ifp = ifp;
513 mli->mli_version = MLD_VERSION_2;
514 mli->mli_flags = 0;
515 mli->mli_rv = MLD_RV_INIT;
516 mli->mli_qi = MLD_QI_INIT;
517 mli->mli_qri = MLD_QRI_INIT;
518 mli->mli_uri = MLD_URI_INIT;
519 mbufq_init(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS);
520
521 LIST_INSERT_HEAD(&V_mli_head, mli, mli_link);
522
523 CTR2(KTR_MLD, "allocate mld_ifsoftc for ifp %p(%s)",
524 ifp, if_name(ifp));
525
526 out:
527 return (mli);
528 }
529
530 /*
531 * Hook for ifdetach.
532 *
533 * NOTE: Some finalization tasks need to run before the protocol domain
534 * is detached, but also before the link layer does its cleanup.
535 * Run before link-layer cleanup; cleanup groups, but do not free MLD state.
536 *
537 * SMPng: Caller must hold IN6_MULTI_LOCK().
538 * Must take IF_ADDR_LOCK() to cover if_multiaddrs iterator.
539 * XXX This routine is also bitten by unlocked ifma_protospec access.
540 */
541 void
542 mld_ifdetach(struct ifnet *ifp, struct in6_multi_head *inmh)
543 {
544 struct epoch_tracker et;
545 struct mld_ifsoftc *mli;
546 struct ifmultiaddr *ifma;
547 struct in6_multi *inm;
548
549 CTR3(KTR_MLD, "%s: called for ifp %p(%s)", __func__, ifp,
550 if_name(ifp));
551
552 IN6_MULTI_LIST_LOCK_ASSERT();
553 MLD_LOCK();
554
555 mli = MLD_IFINFO(ifp);
556 IF_ADDR_WLOCK(ifp);
557 /*
558 * Extract list of in6_multi associated with the detaching ifp
559 * which the PF_INET6 layer is about to release.
560 */
561 NET_EPOCH_ENTER(et);
562 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
563 inm = in6m_ifmultiaddr_get_inm(ifma);
564 if (inm == NULL)
565 continue;
566 in6m_disconnect_locked(inmh, inm);
567
568 if (mli->mli_version == MLD_VERSION_2) {
569 in6m_clear_recorded(inm);
570
571 /*
572 * We need to release the final reference held
573 * for issuing the INCLUDE {}.
574 */
575 if (inm->in6m_state == MLD_LEAVING_MEMBER) {
576 inm->in6m_state = MLD_NOT_MEMBER;
577 in6m_rele_locked(inmh, inm);
578 }
579 }
580 }
581 NET_EPOCH_EXIT(et);
582 IF_ADDR_WUNLOCK(ifp);
583 MLD_UNLOCK();
584 }
585
586 /*
587 * Hook for domifdetach.
588 * Runs after link-layer cleanup; free MLD state.
589 *
590 * SMPng: Normally called with IF_AFDATA_LOCK held.
591 */
592 void
593 mld_domifdetach(struct ifnet *ifp)
594 {
595
596 CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
597 __func__, ifp, if_name(ifp));
598
599 MLD_LOCK();
600 mli_delete_locked(ifp);
601 MLD_UNLOCK();
602 }
603
604 static void
605 mli_delete_locked(const struct ifnet *ifp)
606 {
607 struct mld_ifsoftc *mli, *tmli;
608
609 CTR3(KTR_MLD, "%s: freeing mld_ifsoftc for ifp %p(%s)",
610 __func__, ifp, if_name(ifp));
611
612 MLD_LOCK_ASSERT();
613
614 LIST_FOREACH_SAFE(mli, &V_mli_head, mli_link, tmli) {
615 if (mli->mli_ifp == ifp) {
616 /*
617 * Free deferred General Query responses.
618 */
619 mbufq_drain(&mli->mli_gq);
620
621 LIST_REMOVE(mli, mli_link);
622
623 free(mli, M_MLD);
624 return;
625 }
626 }
627 }
628
629 /*
630 * Process a received MLDv1 general or address-specific query.
631 * Assumes that the query header has been pulled up to sizeof(mld_hdr).
632 *
633 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
634 * mld_addr. This is OK as we own the mbuf chain.
635 */
636 static int
637 mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
638 /*const*/ struct mld_hdr *mld)
639 {
640 struct ifmultiaddr *ifma;
641 struct mld_ifsoftc *mli;
642 struct in6_multi *inm;
643 int is_general_query;
644 uint16_t timer;
645 #ifdef KTR
646 char ip6tbuf[INET6_ADDRSTRLEN];
647 #endif
648
649 NET_EPOCH_ASSERT();
650
651 is_general_query = 0;
652
653 if (!mld_v1enable) {
654 CTR3(KTR_MLD, "ignore v1 query %s on ifp %p(%s)",
655 ip6_sprintf(ip6tbuf, &mld->mld_addr),
656 ifp, if_name(ifp));
657 return (0);
658 }
659
660 /*
661 * RFC3810 Section 6.2: MLD queries must originate from
662 * a router's link-local address.
663 */
664 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
665 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
666 ip6_sprintf(ip6tbuf, &ip6->ip6_src),
667 ifp, if_name(ifp));
668 return (0);
669 }
670
671 /*
672 * Do address field validation upfront before we accept
673 * the query.
674 */
675 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
676 /*
677 * MLDv1 General Query.
678 * If this was not sent to the all-nodes group, ignore it.
679 */
680 struct in6_addr dst;
681
682 dst = ip6->ip6_dst;
683 in6_clearscope(&dst);
684 if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes))
685 return (EINVAL);
686 is_general_query = 1;
687 } else {
688 /*
689 * Embed scope ID of receiving interface in MLD query for
690 * lookup whilst we don't hold other locks.
691 */
692 in6_setscope(&mld->mld_addr, ifp, NULL);
693 }
694
695 IN6_MULTI_LIST_LOCK();
696 MLD_LOCK();
697
698 /*
699 * Switch to MLDv1 host compatibility mode.
700 */
701 mli = MLD_IFINFO(ifp);
702 KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
703 mld_set_version(mli, MLD_VERSION_1);
704
705 timer = (ntohs(mld->mld_maxdelay) * PR_FASTHZ) / MLD_TIMER_SCALE;
706 if (timer == 0)
707 timer = 1;
708
709 if (is_general_query) {
710 /*
711 * For each reporting group joined on this
712 * interface, kick the report timer.
713 */
714 CTR2(KTR_MLD, "process v1 general query on ifp %p(%s)",
715 ifp, if_name(ifp));
716 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
717 inm = in6m_ifmultiaddr_get_inm(ifma);
718 if (inm == NULL)
719 continue;
720 mld_v1_update_group(inm, timer);
721 }
722 } else {
723 /*
724 * MLDv1 Group-Specific Query.
725 * If this is a group-specific MLDv1 query, we need only
726 * look up the single group to process it.
727 */
728 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
729 if (inm != NULL) {
730 CTR3(KTR_MLD, "process v1 query %s on ifp %p(%s)",
731 ip6_sprintf(ip6tbuf, &mld->mld_addr),
732 ifp, if_name(ifp));
733 mld_v1_update_group(inm, timer);
734 }
735 /* XXX Clear embedded scope ID as userland won't expect it. */
736 in6_clearscope(&mld->mld_addr);
737 }
738
739 MLD_UNLOCK();
740 IN6_MULTI_LIST_UNLOCK();
741
742 return (0);
743 }
744
745 /*
746 * Update the report timer on a group in response to an MLDv1 query.
747 *
748 * If we are becoming the reporting member for this group, start the timer.
749 * If we already are the reporting member for this group, and timer is
750 * below the threshold, reset it.
751 *
752 * We may be updating the group for the first time since we switched
753 * to MLDv2. If we are, then we must clear any recorded source lists,
754 * and transition to REPORTING state; the group timer is overloaded
755 * for group and group-source query responses.
756 *
757 * Unlike MLDv2, the delay per group should be jittered
758 * to avoid bursts of MLDv1 reports.
759 */
760 static void
761 mld_v1_update_group(struct in6_multi *inm, const int timer)
762 {
763 #ifdef KTR
764 char ip6tbuf[INET6_ADDRSTRLEN];
765 #endif
766
767 CTR4(KTR_MLD, "%s: %s/%s timer=%d", __func__,
768 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
769 if_name(inm->in6m_ifp), timer);
770
771 IN6_MULTI_LIST_LOCK_ASSERT();
772
773 switch (inm->in6m_state) {
774 case MLD_NOT_MEMBER:
775 case MLD_SILENT_MEMBER:
776 break;
777 case MLD_REPORTING_MEMBER:
778 if (inm->in6m_timer != 0 &&
779 inm->in6m_timer <= timer) {
780 CTR1(KTR_MLD, "%s: REPORTING and timer running, "
781 "skipping.", __func__);
782 break;
783 }
784 /* FALLTHROUGH */
785 case MLD_SG_QUERY_PENDING_MEMBER:
786 case MLD_G_QUERY_PENDING_MEMBER:
787 case MLD_IDLE_MEMBER:
788 case MLD_LAZY_MEMBER:
789 case MLD_AWAKENING_MEMBER:
790 CTR1(KTR_MLD, "%s: ->REPORTING", __func__);
791 inm->in6m_state = MLD_REPORTING_MEMBER;
792 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
793 V_current_state_timers_running6 = 1;
794 break;
795 case MLD_SLEEPING_MEMBER:
796 CTR1(KTR_MLD, "%s: ->AWAKENING", __func__);
797 inm->in6m_state = MLD_AWAKENING_MEMBER;
798 break;
799 case MLD_LEAVING_MEMBER:
800 break;
801 }
802 }
803
804 /*
805 * Process a received MLDv2 general, group-specific or
806 * group-and-source-specific query.
807 *
808 * Assumes that mld points to a struct mldv2_query which is stored in
809 * contiguous memory.
810 *
811 * Return 0 if successful, otherwise an appropriate error code is returned.
812 */
813 static int
814 mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
815 struct mbuf *m, struct mldv2_query *mld, const int off, const int icmp6len)
816 {
817 struct mld_ifsoftc *mli;
818 struct in6_multi *inm;
819 uint32_t maxdelay, nsrc, qqi;
820 int is_general_query;
821 uint16_t timer;
822 uint8_t qrv;
823 #ifdef KTR
824 char ip6tbuf[INET6_ADDRSTRLEN];
825 #endif
826
827 NET_EPOCH_ASSERT();
828
829 if (!mld_v2enable) {
830 CTR3(KTR_MLD, "ignore v2 query src %s on ifp %p(%s)",
831 ip6_sprintf(ip6tbuf, &ip6->ip6_src),
832 ifp, if_name(ifp));
833 return (0);
834 }
835
836 /*
837 * RFC3810 Section 6.2: MLD queries must originate from
838 * a router's link-local address.
839 */
840 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
841 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
842 ip6_sprintf(ip6tbuf, &ip6->ip6_src),
843 ifp, if_name(ifp));
844 return (0);
845 }
846
847 is_general_query = 0;
848
849 CTR2(KTR_MLD, "input v2 query on ifp %p(%s)", ifp, if_name(ifp));
850
851 maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */
852 if (maxdelay >= 32768) {
853 maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) <<
854 (MLD_MRC_EXP(maxdelay) + 3);
855 }
856 timer = (maxdelay * PR_FASTHZ) / MLD_TIMER_SCALE;
857 if (timer == 0)
858 timer = 1;
859
860 qrv = MLD_QRV(mld->mld_misc);
861 if (qrv < 2) {
862 CTR3(KTR_MLD, "%s: clamping qrv %d to %d", __func__,
863 qrv, MLD_RV_INIT);
864 qrv = MLD_RV_INIT;
865 }
866
867 qqi = mld->mld_qqi;
868 if (qqi >= 128) {
869 qqi = MLD_QQIC_MANT(mld->mld_qqi) <<
870 (MLD_QQIC_EXP(mld->mld_qqi) + 3);
871 }
872
873 nsrc = ntohs(mld->mld_numsrc);
874 if (nsrc > MLD_MAX_GS_SOURCES)
875 return (EMSGSIZE);
876 if (icmp6len < sizeof(struct mldv2_query) +
877 (nsrc * sizeof(struct in6_addr)))
878 return (EMSGSIZE);
879
880 /*
881 * Do further input validation upfront to avoid resetting timers
882 * should we need to discard this query.
883 */
884 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
885 /*
886 * A general query with a source list has undefined
887 * behaviour; discard it.
888 */
889 if (nsrc > 0)
890 return (EINVAL);
891 is_general_query = 1;
892 } else {
893 /*
894 * Embed scope ID of receiving interface in MLD query for
895 * lookup whilst we don't hold other locks (due to KAME
896 * locking lameness). We own this mbuf chain just now.
897 */
898 in6_setscope(&mld->mld_addr, ifp, NULL);
899 }
900
901 IN6_MULTI_LIST_LOCK();
902 MLD_LOCK();
903
904 mli = MLD_IFINFO(ifp);
905 KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
906
907 /*
908 * Discard the v2 query if we're in Compatibility Mode.
909 * The RFC is pretty clear that hosts need to stay in MLDv1 mode
910 * until the Old Version Querier Present timer expires.
911 */
912 if (mli->mli_version != MLD_VERSION_2)
913 goto out_locked;
914
915 mld_set_version(mli, MLD_VERSION_2);
916 mli->mli_rv = qrv;
917 mli->mli_qi = qqi;
918 mli->mli_qri = maxdelay;
919
920 CTR4(KTR_MLD, "%s: qrv %d qi %d maxdelay %d", __func__, qrv, qqi,
921 maxdelay);
922
923 if (is_general_query) {
924 /*
925 * MLDv2 General Query.
926 *
927 * Schedule a current-state report on this ifp for
928 * all groups, possibly containing source lists.
929 *
930 * If there is a pending General Query response
931 * scheduled earlier than the selected delay, do
932 * not schedule any other reports.
933 * Otherwise, reset the interface timer.
934 */
935 CTR2(KTR_MLD, "process v2 general query on ifp %p(%s)",
936 ifp, if_name(ifp));
937 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
938 mli->mli_v2_timer = MLD_RANDOM_DELAY(timer);
939 V_interface_timers_running6 = 1;
940 }
941 } else {
942 /*
943 * MLDv2 Group-specific or Group-and-source-specific Query.
944 *
945 * Group-source-specific queries are throttled on
946 * a per-group basis to defeat denial-of-service attempts.
947 * Queries for groups we are not a member of on this
948 * link are simply ignored.
949 */
950 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
951 if (inm == NULL)
952 goto out_locked;
953 if (nsrc > 0) {
954 if (!ratecheck(&inm->in6m_lastgsrtv,
955 &V_mld_gsrdelay)) {
956 CTR1(KTR_MLD, "%s: GS query throttled.",
957 __func__);
958 goto out_locked;
959 }
960 }
961 CTR2(KTR_MLD, "process v2 group query on ifp %p(%s)",
962 ifp, if_name(ifp));
963 /*
964 * If there is a pending General Query response
965 * scheduled sooner than the selected delay, no
966 * further report need be scheduled.
967 * Otherwise, prepare to respond to the
968 * group-specific or group-and-source query.
969 */
970 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer)
971 mld_v2_process_group_query(inm, mli, timer, m, mld, off);
972
973 /* XXX Clear embedded scope ID as userland won't expect it. */
974 in6_clearscope(&mld->mld_addr);
975 }
976
977 out_locked:
978 MLD_UNLOCK();
979 IN6_MULTI_LIST_UNLOCK();
980
981 return (0);
982 }
983
984 /*
985 * Process a received MLDv2 group-specific or group-and-source-specific
986 * query.
987 * Return <0 if any error occurred. Currently this is ignored.
988 */
989 static int
990 mld_v2_process_group_query(struct in6_multi *inm, struct mld_ifsoftc *mli,
991 int timer, struct mbuf *m0, struct mldv2_query *mld, const int off)
992 {
993 int retval;
994 uint16_t nsrc;
995
996 IN6_MULTI_LIST_LOCK_ASSERT();
997 MLD_LOCK_ASSERT();
998
999 retval = 0;
1000
1001 switch (inm->in6m_state) {
1002 case MLD_NOT_MEMBER:
1003 case MLD_SILENT_MEMBER:
1004 case MLD_SLEEPING_MEMBER:
1005 case MLD_LAZY_MEMBER:
1006 case MLD_AWAKENING_MEMBER:
1007 case MLD_IDLE_MEMBER:
1008 case MLD_LEAVING_MEMBER:
1009 return (retval);
1010 break;
1011 case MLD_REPORTING_MEMBER:
1012 case MLD_G_QUERY_PENDING_MEMBER:
1013 case MLD_SG_QUERY_PENDING_MEMBER:
1014 break;
1015 }
1016
1017 nsrc = ntohs(mld->mld_numsrc);
1018
1019 /* Length should be checked by calling function. */
1020 KASSERT((m0->m_flags & M_PKTHDR) == 0 ||
1021 m0->m_pkthdr.len >= off + sizeof(struct mldv2_query) +
1022 nsrc * sizeof(struct in6_addr),
1023 ("mldv2 packet is too short: (%d bytes < %zd bytes, m=%p)",
1024 m0->m_pkthdr.len, off + sizeof(struct mldv2_query) +
1025 nsrc * sizeof(struct in6_addr), m0));
1026
1027 /*
1028 * Deal with group-specific queries upfront.
1029 * If any group query is already pending, purge any recorded
1030 * source-list state if it exists, and schedule a query response
1031 * for this group-specific query.
1032 */
1033 if (nsrc == 0) {
1034 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
1035 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
1036 in6m_clear_recorded(inm);
1037 timer = min(inm->in6m_timer, timer);
1038 }
1039 inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER;
1040 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1041 V_current_state_timers_running6 = 1;
1042 return (retval);
1043 }
1044
1045 /*
1046 * Deal with the case where a group-and-source-specific query has
1047 * been received but a group-specific query is already pending.
1048 */
1049 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) {
1050 timer = min(inm->in6m_timer, timer);
1051 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1052 V_current_state_timers_running6 = 1;
1053 return (retval);
1054 }
1055
1056 /*
1057 * Finally, deal with the case where a group-and-source-specific
1058 * query has been received, where a response to a previous g-s-r
1059 * query exists, or none exists.
1060 * In this case, we need to parse the source-list which the Querier
1061 * has provided us with and check if we have any source list filter
1062 * entries at T1 for these sources. If we do not, there is no need
1063 * schedule a report and the query may be dropped.
1064 * If we do, we must record them and schedule a current-state
1065 * report for those sources.
1066 */
1067 if (inm->in6m_nsrc > 0) {
1068 struct in6_addr srcaddr;
1069 int i, nrecorded;
1070 int soff;
1071
1072 soff = off + sizeof(struct mldv2_query);
1073 nrecorded = 0;
1074 for (i = 0; i < nsrc; i++) {
1075 m_copydata(m0, soff, sizeof(struct in6_addr),
1076 (caddr_t)&srcaddr);
1077 retval = in6m_record_source(inm, &srcaddr);
1078 if (retval < 0)
1079 break;
1080 nrecorded += retval;
1081 soff += sizeof(struct in6_addr);
1082 }
1083 if (nrecorded > 0) {
1084 CTR1(KTR_MLD,
1085 "%s: schedule response to SG query", __func__);
1086 inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER;
1087 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1088 V_current_state_timers_running6 = 1;
1089 }
1090 }
1091
1092 return (retval);
1093 }
1094
1095 /*
1096 * Process a received MLDv1 host membership report.
1097 * Assumes mld points to mld_hdr in pulled up mbuf chain.
1098 *
1099 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
1100 * mld_addr. This is OK as we own the mbuf chain.
1101 */
1102 static int
1103 mld_v1_input_report(struct ifnet *ifp, const struct ip6_hdr *ip6,
1104 /*const*/ struct mld_hdr *mld)
1105 {
1106 struct in6_addr src, dst;
1107 struct in6_ifaddr *ia;
1108 struct in6_multi *inm;
1109 #ifdef KTR
1110 char ip6tbuf[INET6_ADDRSTRLEN];
1111 #endif
1112
1113 NET_EPOCH_ASSERT();
1114
1115 if (!mld_v1enable) {
1116 CTR3(KTR_MLD, "ignore v1 report %s on ifp %p(%s)",
1117 ip6_sprintf(ip6tbuf, &mld->mld_addr),
1118 ifp, if_name(ifp));
1119 return (0);
1120 }
1121
1122 if (ifp->if_flags & IFF_LOOPBACK)
1123 return (0);
1124
1125 /*
1126 * MLDv1 reports must originate from a host's link-local address,
1127 * or the unspecified address (when booting).
1128 */
1129 src = ip6->ip6_src;
1130 in6_clearscope(&src);
1131 if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) {
1132 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
1133 ip6_sprintf(ip6tbuf, &ip6->ip6_src),
1134 ifp, if_name(ifp));
1135 return (EINVAL);
1136 }
1137
1138 /*
1139 * RFC2710 Section 4: MLDv1 reports must pertain to a multicast
1140 * group, and must be directed to the group itself.
1141 */
1142 dst = ip6->ip6_dst;
1143 in6_clearscope(&dst);
1144 if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) ||
1145 !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) {
1146 CTR3(KTR_MLD, "ignore v1 query dst %s on ifp %p(%s)",
1147 ip6_sprintf(ip6tbuf, &ip6->ip6_dst),
1148 ifp, if_name(ifp));
1149 return (EINVAL);
1150 }
1151
1152 /*
1153 * Make sure we don't hear our own membership report, as fast
1154 * leave requires knowing that we are the only member of a
1155 * group. Assume we used the link-local address if available,
1156 * otherwise look for ::.
1157 *
1158 * XXX Note that scope ID comparison is needed for the address
1159 * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
1160 * performed for the on-wire address.
1161 */
1162 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
1163 if ((ia && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia))) ||
1164 (ia == NULL && IN6_IS_ADDR_UNSPECIFIED(&src))) {
1165 if (ia != NULL)
1166 ifa_free(&ia->ia_ifa);
1167 return (0);
1168 }
1169 if (ia != NULL)
1170 ifa_free(&ia->ia_ifa);
1171
1172 CTR3(KTR_MLD, "process v1 report %s on ifp %p(%s)",
1173 ip6_sprintf(ip6tbuf, &mld->mld_addr), ifp, if_name(ifp));
1174
1175 /*
1176 * Embed scope ID of receiving interface in MLD query for lookup
1177 * whilst we don't hold other locks (due to KAME locking lameness).
1178 */
1179 if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr))
1180 in6_setscope(&mld->mld_addr, ifp, NULL);
1181
1182 IN6_MULTI_LIST_LOCK();
1183 MLD_LOCK();
1184
1185 /*
1186 * MLDv1 report suppression.
1187 * If we are a member of this group, and our membership should be
1188 * reported, and our group timer is pending or about to be reset,
1189 * stop our group timer by transitioning to the 'lazy' state.
1190 */
1191 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
1192 if (inm != NULL) {
1193 struct mld_ifsoftc *mli;
1194
1195 mli = inm->in6m_mli;
1196 KASSERT(mli != NULL,
1197 ("%s: no mli for ifp %p", __func__, ifp));
1198
1199 /*
1200 * If we are in MLDv2 host mode, do not allow the
1201 * other host's MLDv1 report to suppress our reports.
1202 */
1203 if (mli->mli_version == MLD_VERSION_2)
1204 goto out_locked;
1205
1206 inm->in6m_timer = 0;
1207
1208 switch (inm->in6m_state) {
1209 case MLD_NOT_MEMBER:
1210 case MLD_SILENT_MEMBER:
1211 case MLD_SLEEPING_MEMBER:
1212 break;
1213 case MLD_REPORTING_MEMBER:
1214 case MLD_IDLE_MEMBER:
1215 case MLD_AWAKENING_MEMBER:
1216 CTR3(KTR_MLD,
1217 "report suppressed for %s on ifp %p(%s)",
1218 ip6_sprintf(ip6tbuf, &mld->mld_addr),
1219 ifp, if_name(ifp));
1220 case MLD_LAZY_MEMBER:
1221 inm->in6m_state = MLD_LAZY_MEMBER;
1222 break;
1223 case MLD_G_QUERY_PENDING_MEMBER:
1224 case MLD_SG_QUERY_PENDING_MEMBER:
1225 case MLD_LEAVING_MEMBER:
1226 break;
1227 }
1228 }
1229
1230 out_locked:
1231 MLD_UNLOCK();
1232 IN6_MULTI_LIST_UNLOCK();
1233
1234 /* XXX Clear embedded scope ID as userland won't expect it. */
1235 in6_clearscope(&mld->mld_addr);
1236
1237 return (0);
1238 }
1239
1240 /*
1241 * MLD input path.
1242 *
1243 * Assume query messages which fit in a single ICMPv6 message header
1244 * have been pulled up.
1245 * Assume that userland will want to see the message, even if it
1246 * otherwise fails kernel input validation; do not free it.
1247 * Pullup may however free the mbuf chain m if it fails.
1248 *
1249 * Return IPPROTO_DONE if we freed m. Otherwise, return 0.
1250 */
1251 int
1252 mld_input(struct mbuf **mp, int off, int icmp6len)
1253 {
1254 struct ifnet *ifp;
1255 struct ip6_hdr *ip6;
1256 struct mbuf *m;
1257 struct mld_hdr *mld;
1258 int mldlen;
1259
1260 m = *mp;
1261 CTR3(KTR_MLD, "%s: called w/mbuf (%p,%d)", __func__, m, off);
1262
1263 ifp = m->m_pkthdr.rcvif;
1264
1265 /* Pullup to appropriate size. */
1266 if (m->m_len < off + sizeof(*mld)) {
1267 m = m_pullup(m, off + sizeof(*mld));
1268 if (m == NULL) {
1269 ICMP6STAT_INC(icp6s_badlen);
1270 return (IPPROTO_DONE);
1271 }
1272 }
1273 mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
1274 if (mld->mld_type == MLD_LISTENER_QUERY &&
1275 icmp6len >= sizeof(struct mldv2_query)) {
1276 mldlen = sizeof(struct mldv2_query);
1277 } else {
1278 mldlen = sizeof(struct mld_hdr);
1279 }
1280 if (m->m_len < off + mldlen) {
1281 m = m_pullup(m, off + mldlen);
1282 if (m == NULL) {
1283 ICMP6STAT_INC(icp6s_badlen);
1284 return (IPPROTO_DONE);
1285 }
1286 }
1287 *mp = m;
1288 ip6 = mtod(m, struct ip6_hdr *);
1289 mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
1290
1291 /*
1292 * Userland needs to see all of this traffic for implementing
1293 * the endpoint discovery portion of multicast routing.
1294 */
1295 switch (mld->mld_type) {
1296 case MLD_LISTENER_QUERY:
1297 icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
1298 if (icmp6len == sizeof(struct mld_hdr)) {
1299 if (mld_v1_input_query(ifp, ip6, mld) != 0)
1300 return (0);
1301 } else if (icmp6len >= sizeof(struct mldv2_query)) {
1302 if (mld_v2_input_query(ifp, ip6, m,
1303 (struct mldv2_query *)mld, off, icmp6len) != 0)
1304 return (0);
1305 }
1306 break;
1307 case MLD_LISTENER_REPORT:
1308 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1309 if (mld_v1_input_report(ifp, ip6, mld) != 0)
1310 return (0);
1311 break;
1312 case MLDV2_LISTENER_REPORT:
1313 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1314 break;
1315 case MLD_LISTENER_DONE:
1316 icmp6_ifstat_inc(ifp, ifs6_in_mlddone);
1317 break;
1318 default:
1319 break;
1320 }
1321
1322 return (0);
1323 }
1324
1325 /*
1326 * Fast timeout handler (global).
1327 * VIMAGE: Timeout handlers are expected to service all vimages.
1328 */
1329 void
1330 mld_fasttimo(void)
1331 {
1332 struct in6_multi_head inmh;
1333 VNET_ITERATOR_DECL(vnet_iter);
1334
1335 SLIST_INIT(&inmh);
1336
1337 VNET_LIST_RLOCK_NOSLEEP();
1338 VNET_FOREACH(vnet_iter) {
1339 CURVNET_SET(vnet_iter);
1340 mld_fasttimo_vnet(&inmh);
1341 CURVNET_RESTORE();
1342 }
1343 VNET_LIST_RUNLOCK_NOSLEEP();
1344 in6m_release_list_deferred(&inmh);
1345 }
1346
1347 /*
1348 * Fast timeout handler (per-vnet).
1349 *
1350 * VIMAGE: Assume caller has set up our curvnet.
1351 */
1352 static void
1353 mld_fasttimo_vnet(struct in6_multi_head *inmh)
1354 {
1355 struct epoch_tracker et;
1356 struct mbufq scq; /* State-change packets */
1357 struct mbufq qrq; /* Query response packets */
1358 struct ifnet *ifp;
1359 struct mld_ifsoftc *mli;
1360 struct ifmultiaddr *ifma;
1361 struct in6_multi *inm;
1362 int uri_fasthz;
1363
1364 uri_fasthz = 0;
1365
1366 /*
1367 * Quick check to see if any work needs to be done, in order to
1368 * minimize the overhead of fasttimo processing.
1369 * SMPng: XXX Unlocked reads.
1370 */
1371 if (!V_current_state_timers_running6 &&
1372 !V_interface_timers_running6 &&
1373 !V_state_change_timers_running6)
1374 return;
1375
1376 IN6_MULTI_LIST_LOCK();
1377 MLD_LOCK();
1378
1379 /*
1380 * MLDv2 General Query response timer processing.
1381 */
1382 if (V_interface_timers_running6) {
1383 CTR1(KTR_MLD, "%s: interface timers running", __func__);
1384
1385 V_interface_timers_running6 = 0;
1386 LIST_FOREACH(mli, &V_mli_head, mli_link) {
1387 if (mli->mli_v2_timer == 0) {
1388 /* Do nothing. */
1389 } else if (--mli->mli_v2_timer == 0) {
1390 mld_v2_dispatch_general_query(mli);
1391 } else {
1392 V_interface_timers_running6 = 1;
1393 }
1394 }
1395 }
1396
1397 if (!V_current_state_timers_running6 &&
1398 !V_state_change_timers_running6)
1399 goto out_locked;
1400
1401 V_current_state_timers_running6 = 0;
1402 V_state_change_timers_running6 = 0;
1403
1404 CTR1(KTR_MLD, "%s: state change timers running", __func__);
1405
1406 /*
1407 * MLD host report and state-change timer processing.
1408 * Note: Processing a v2 group timer may remove a node.
1409 */
1410 LIST_FOREACH(mli, &V_mli_head, mli_link) {
1411 ifp = mli->mli_ifp;
1412
1413 if (mli->mli_version == MLD_VERSION_2) {
1414 uri_fasthz = MLD_RANDOM_DELAY(mli->mli_uri *
1415 PR_FASTHZ);
1416 mbufq_init(&qrq, MLD_MAX_G_GS_PACKETS);
1417 mbufq_init(&scq, MLD_MAX_STATE_CHANGE_PACKETS);
1418 }
1419
1420 NET_EPOCH_ENTER(et);
1421 IF_ADDR_WLOCK(ifp);
1422 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1423 inm = in6m_ifmultiaddr_get_inm(ifma);
1424 if (inm == NULL)
1425 continue;
1426 switch (mli->mli_version) {
1427 case MLD_VERSION_1:
1428 mld_v1_process_group_timer(inmh, inm);
1429 break;
1430 case MLD_VERSION_2:
1431 mld_v2_process_group_timers(inmh, &qrq,
1432 &scq, inm, uri_fasthz);
1433 break;
1434 }
1435 }
1436 IF_ADDR_WUNLOCK(ifp);
1437
1438 switch (mli->mli_version) {
1439 case MLD_VERSION_1:
1440 /*
1441 * Transmit reports for this lifecycle. This
1442 * is done while not holding IF_ADDR_LOCK
1443 * since this can call
1444 * in6ifa_ifpforlinklocal() which locks
1445 * IF_ADDR_LOCK internally as well as
1446 * ip6_output() to transmit a packet.
1447 */
1448 while ((inm = SLIST_FIRST(inmh)) != NULL) {
1449 SLIST_REMOVE_HEAD(inmh, in6m_defer);
1450 (void)mld_v1_transmit_report(inm,
1451 MLD_LISTENER_REPORT);
1452 }
1453 break;
1454 case MLD_VERSION_2:
1455 mld_dispatch_queue(&qrq, 0);
1456 mld_dispatch_queue(&scq, 0);
1457 break;
1458 }
1459 NET_EPOCH_EXIT(et);
1460 }
1461
1462 out_locked:
1463 MLD_UNLOCK();
1464 IN6_MULTI_LIST_UNLOCK();
1465 }
1466
1467 /*
1468 * Update host report group timer.
1469 * Will update the global pending timer flags.
1470 */
1471 static void
1472 mld_v1_process_group_timer(struct in6_multi_head *inmh, struct in6_multi *inm)
1473 {
1474 int report_timer_expired;
1475
1476 IN6_MULTI_LIST_LOCK_ASSERT();
1477 MLD_LOCK_ASSERT();
1478
1479 if (inm->in6m_timer == 0) {
1480 report_timer_expired = 0;
1481 } else if (--inm->in6m_timer == 0) {
1482 report_timer_expired = 1;
1483 } else {
1484 V_current_state_timers_running6 = 1;
1485 return;
1486 }
1487
1488 switch (inm->in6m_state) {
1489 case MLD_NOT_MEMBER:
1490 case MLD_SILENT_MEMBER:
1491 case MLD_IDLE_MEMBER:
1492 case MLD_LAZY_MEMBER:
1493 case MLD_SLEEPING_MEMBER:
1494 case MLD_AWAKENING_MEMBER:
1495 break;
1496 case MLD_REPORTING_MEMBER:
1497 if (report_timer_expired) {
1498 inm->in6m_state = MLD_IDLE_MEMBER;
1499 SLIST_INSERT_HEAD(inmh, inm, in6m_defer);
1500 }
1501 break;
1502 case MLD_G_QUERY_PENDING_MEMBER:
1503 case MLD_SG_QUERY_PENDING_MEMBER:
1504 case MLD_LEAVING_MEMBER:
1505 break;
1506 }
1507 }
1508
1509 /*
1510 * Update a group's timers for MLDv2.
1511 * Will update the global pending timer flags.
1512 * Note: Unlocked read from mli.
1513 */
1514 static void
1515 mld_v2_process_group_timers(struct in6_multi_head *inmh,
1516 struct mbufq *qrq, struct mbufq *scq,
1517 struct in6_multi *inm, const int uri_fasthz)
1518 {
1519 int query_response_timer_expired;
1520 int state_change_retransmit_timer_expired;
1521 #ifdef KTR
1522 char ip6tbuf[INET6_ADDRSTRLEN];
1523 #endif
1524
1525 IN6_MULTI_LIST_LOCK_ASSERT();
1526 MLD_LOCK_ASSERT();
1527
1528 query_response_timer_expired = 0;
1529 state_change_retransmit_timer_expired = 0;
1530
1531 /*
1532 * During a transition from compatibility mode back to MLDv2,
1533 * a group record in REPORTING state may still have its group
1534 * timer active. This is a no-op in this function; it is easier
1535 * to deal with it here than to complicate the slow-timeout path.
1536 */
1537 if (inm->in6m_timer == 0) {
1538 query_response_timer_expired = 0;
1539 } else if (--inm->in6m_timer == 0) {
1540 query_response_timer_expired = 1;
1541 } else {
1542 V_current_state_timers_running6 = 1;
1543 }
1544
1545 if (inm->in6m_sctimer == 0) {
1546 state_change_retransmit_timer_expired = 0;
1547 } else if (--inm->in6m_sctimer == 0) {
1548 state_change_retransmit_timer_expired = 1;
1549 } else {
1550 V_state_change_timers_running6 = 1;
1551 }
1552
1553 /* We are in fasttimo, so be quick about it. */
1554 if (!state_change_retransmit_timer_expired &&
1555 !query_response_timer_expired)
1556 return;
1557
1558 switch (inm->in6m_state) {
1559 case MLD_NOT_MEMBER:
1560 case MLD_SILENT_MEMBER:
1561 case MLD_SLEEPING_MEMBER:
1562 case MLD_LAZY_MEMBER:
1563 case MLD_AWAKENING_MEMBER:
1564 case MLD_IDLE_MEMBER:
1565 break;
1566 case MLD_G_QUERY_PENDING_MEMBER:
1567 case MLD_SG_QUERY_PENDING_MEMBER:
1568 /*
1569 * Respond to a previously pending Group-Specific
1570 * or Group-and-Source-Specific query by enqueueing
1571 * the appropriate Current-State report for
1572 * immediate transmission.
1573 */
1574 if (query_response_timer_expired) {
1575 int retval;
1576
1577 retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1,
1578 (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER),
1579 0);
1580 CTR2(KTR_MLD, "%s: enqueue record = %d",
1581 __func__, retval);
1582 inm->in6m_state = MLD_REPORTING_MEMBER;
1583 in6m_clear_recorded(inm);
1584 }
1585 /* FALLTHROUGH */
1586 case MLD_REPORTING_MEMBER:
1587 case MLD_LEAVING_MEMBER:
1588 if (state_change_retransmit_timer_expired) {
1589 /*
1590 * State-change retransmission timer fired.
1591 * If there are any further pending retransmissions,
1592 * set the global pending state-change flag, and
1593 * reset the timer.
1594 */
1595 if (--inm->in6m_scrv > 0) {
1596 inm->in6m_sctimer = uri_fasthz;
1597 V_state_change_timers_running6 = 1;
1598 }
1599 /*
1600 * Retransmit the previously computed state-change
1601 * report. If there are no further pending
1602 * retransmissions, the mbuf queue will be consumed.
1603 * Update T0 state to T1 as we have now sent
1604 * a state-change.
1605 */
1606 (void)mld_v2_merge_state_changes(inm, scq);
1607
1608 in6m_commit(inm);
1609 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
1610 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
1611 if_name(inm->in6m_ifp));
1612
1613 /*
1614 * If we are leaving the group for good, make sure
1615 * we release MLD's reference to it.
1616 * This release must be deferred using a SLIST,
1617 * as we are called from a loop which traverses
1618 * the in_ifmultiaddr TAILQ.
1619 */
1620 if (inm->in6m_state == MLD_LEAVING_MEMBER &&
1621 inm->in6m_scrv == 0) {
1622 inm->in6m_state = MLD_NOT_MEMBER;
1623 in6m_disconnect_locked(inmh, inm);
1624 in6m_rele_locked(inmh, inm);
1625 }
1626 }
1627 break;
1628 }
1629 }
1630
1631 /*
1632 * Switch to a different version on the given interface,
1633 * as per Section 9.12.
1634 */
1635 static void
1636 mld_set_version(struct mld_ifsoftc *mli, const int version)
1637 {
1638 int old_version_timer;
1639
1640 MLD_LOCK_ASSERT();
1641
1642 CTR4(KTR_MLD, "%s: switching to v%d on ifp %p(%s)", __func__,
1643 version, mli->mli_ifp, if_name(mli->mli_ifp));
1644
1645 if (version == MLD_VERSION_1) {
1646 /*
1647 * Compute the "Older Version Querier Present" timer as per
1648 * Section 9.12.
1649 */
1650 old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri;
1651 old_version_timer *= PR_SLOWHZ;
1652 mli->mli_v1_timer = old_version_timer;
1653 }
1654
1655 if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) {
1656 mli->mli_version = MLD_VERSION_1;
1657 mld_v2_cancel_link_timers(mli);
1658 }
1659 }
1660
1661 /*
1662 * Cancel pending MLDv2 timers for the given link and all groups
1663 * joined on it; state-change, general-query, and group-query timers.
1664 */
1665 static void
1666 mld_v2_cancel_link_timers(struct mld_ifsoftc *mli)
1667 {
1668 struct epoch_tracker et;
1669 struct in6_multi_head inmh;
1670 struct ifmultiaddr *ifma;
1671 struct ifnet *ifp;
1672 struct in6_multi *inm;
1673
1674 CTR3(KTR_MLD, "%s: cancel v2 timers on ifp %p(%s)", __func__,
1675 mli->mli_ifp, if_name(mli->mli_ifp));
1676
1677 SLIST_INIT(&inmh);
1678 IN6_MULTI_LIST_LOCK_ASSERT();
1679 MLD_LOCK_ASSERT();
1680
1681 /*
1682 * Fast-track this potentially expensive operation
1683 * by checking all the global 'timer pending' flags.
1684 */
1685 if (!V_interface_timers_running6 &&
1686 !V_state_change_timers_running6 &&
1687 !V_current_state_timers_running6)
1688 return;
1689
1690 mli->mli_v2_timer = 0;
1691
1692 ifp = mli->mli_ifp;
1693
1694 IF_ADDR_WLOCK(ifp);
1695 NET_EPOCH_ENTER(et);
1696 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1697 inm = in6m_ifmultiaddr_get_inm(ifma);
1698 if (inm == NULL)
1699 continue;
1700 switch (inm->in6m_state) {
1701 case MLD_NOT_MEMBER:
1702 case MLD_SILENT_MEMBER:
1703 case MLD_IDLE_MEMBER:
1704 case MLD_LAZY_MEMBER:
1705 case MLD_SLEEPING_MEMBER:
1706 case MLD_AWAKENING_MEMBER:
1707 break;
1708 case MLD_LEAVING_MEMBER:
1709 /*
1710 * If we are leaving the group and switching
1711 * version, we need to release the final
1712 * reference held for issuing the INCLUDE {}.
1713 */
1714 if (inm->in6m_refcount == 1)
1715 in6m_disconnect_locked(&inmh, inm);
1716 in6m_rele_locked(&inmh, inm);
1717 /* FALLTHROUGH */
1718 case MLD_G_QUERY_PENDING_MEMBER:
1719 case MLD_SG_QUERY_PENDING_MEMBER:
1720 in6m_clear_recorded(inm);
1721 /* FALLTHROUGH */
1722 case MLD_REPORTING_MEMBER:
1723 inm->in6m_sctimer = 0;
1724 inm->in6m_timer = 0;
1725 inm->in6m_state = MLD_REPORTING_MEMBER;
1726 /*
1727 * Free any pending MLDv2 state-change records.
1728 */
1729 mbufq_drain(&inm->in6m_scq);
1730 break;
1731 }
1732 }
1733 NET_EPOCH_EXIT(et);
1734 IF_ADDR_WUNLOCK(ifp);
1735 in6m_release_list_deferred(&inmh);
1736 }
1737
1738 /*
1739 * Global slowtimo handler.
1740 * VIMAGE: Timeout handlers are expected to service all vimages.
1741 */
1742 void
1743 mld_slowtimo(void)
1744 {
1745 VNET_ITERATOR_DECL(vnet_iter);
1746
1747 VNET_LIST_RLOCK_NOSLEEP();
1748 VNET_FOREACH(vnet_iter) {
1749 CURVNET_SET(vnet_iter);
1750 mld_slowtimo_vnet();
1751 CURVNET_RESTORE();
1752 }
1753 VNET_LIST_RUNLOCK_NOSLEEP();
1754 }
1755
1756 /*
1757 * Per-vnet slowtimo handler.
1758 */
1759 static void
1760 mld_slowtimo_vnet(void)
1761 {
1762 struct mld_ifsoftc *mli;
1763
1764 MLD_LOCK();
1765
1766 LIST_FOREACH(mli, &V_mli_head, mli_link) {
1767 mld_v1_process_querier_timers(mli);
1768 }
1769
1770 MLD_UNLOCK();
1771 }
1772
1773 /*
1774 * Update the Older Version Querier Present timers for a link.
1775 * See Section 9.12 of RFC 3810.
1776 */
1777 static void
1778 mld_v1_process_querier_timers(struct mld_ifsoftc *mli)
1779 {
1780
1781 MLD_LOCK_ASSERT();
1782
1783 if (mli->mli_version != MLD_VERSION_2 && --mli->mli_v1_timer == 0) {
1784 /*
1785 * MLDv1 Querier Present timer expired; revert to MLDv2.
1786 */
1787 CTR5(KTR_MLD,
1788 "%s: transition from v%d -> v%d on %p(%s)",
1789 __func__, mli->mli_version, MLD_VERSION_2,
1790 mli->mli_ifp, if_name(mli->mli_ifp));
1791 mli->mli_version = MLD_VERSION_2;
1792 }
1793 }
1794
1795 /*
1796 * Transmit an MLDv1 report immediately.
1797 */
1798 static int
1799 mld_v1_transmit_report(struct in6_multi *in6m, const int type)
1800 {
1801 struct ifnet *ifp;
1802 struct in6_ifaddr *ia;
1803 struct ip6_hdr *ip6;
1804 struct mbuf *mh, *md;
1805 struct mld_hdr *mld;
1806
1807 NET_EPOCH_ASSERT();
1808 IN6_MULTI_LIST_LOCK_ASSERT();
1809 MLD_LOCK_ASSERT();
1810
1811 ifp = in6m->in6m_ifp;
1812 /* in process of being freed */
1813 if (ifp == NULL)
1814 return (0);
1815 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
1816 /* ia may be NULL if link-local address is tentative. */
1817
1818 mh = m_gethdr(M_NOWAIT, MT_DATA);
1819 if (mh == NULL) {
1820 if (ia != NULL)
1821 ifa_free(&ia->ia_ifa);
1822 return (ENOMEM);
1823 }
1824 md = m_get(M_NOWAIT, MT_DATA);
1825 if (md == NULL) {
1826 m_free(mh);
1827 if (ia != NULL)
1828 ifa_free(&ia->ia_ifa);
1829 return (ENOMEM);
1830 }
1831 mh->m_next = md;
1832
1833 /*
1834 * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so
1835 * that ether_output() does not need to allocate another mbuf
1836 * for the header in the most common case.
1837 */
1838 M_ALIGN(mh, sizeof(struct ip6_hdr));
1839 mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr);
1840 mh->m_len = sizeof(struct ip6_hdr);
1841
1842 ip6 = mtod(mh, struct ip6_hdr *);
1843 ip6->ip6_flow = 0;
1844 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
1845 ip6->ip6_vfc |= IPV6_VERSION;
1846 ip6->ip6_nxt = IPPROTO_ICMPV6;
1847 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
1848 ip6->ip6_dst = in6m->in6m_addr;
1849
1850 md->m_len = sizeof(struct mld_hdr);
1851 mld = mtod(md, struct mld_hdr *);
1852 mld->mld_type = type;
1853 mld->mld_code = 0;
1854 mld->mld_cksum = 0;
1855 mld->mld_maxdelay = 0;
1856 mld->mld_reserved = 0;
1857 mld->mld_addr = in6m->in6m_addr;
1858 in6_clearscope(&mld->mld_addr);
1859 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
1860 sizeof(struct ip6_hdr), sizeof(struct mld_hdr));
1861
1862 mld_save_context(mh, ifp);
1863 mh->m_flags |= M_MLDV1;
1864
1865 mld_dispatch_packet(mh);
1866
1867 if (ia != NULL)
1868 ifa_free(&ia->ia_ifa);
1869 return (0);
1870 }
1871
1872 /*
1873 * Process a state change from the upper layer for the given IPv6 group.
1874 *
1875 * Each socket holds a reference on the in_multi in its own ip_moptions.
1876 * The socket layer will have made the necessary updates to.the group
1877 * state, it is now up to MLD to issue a state change report if there
1878 * has been any change between T0 (when the last state-change was issued)
1879 * and T1 (now).
1880 *
1881 * We use the MLDv2 state machine at group level. The MLd module
1882 * however makes the decision as to which MLD protocol version to speak.
1883 * A state change *from* INCLUDE {} always means an initial join.
1884 * A state change *to* INCLUDE {} always means a final leave.
1885 *
1886 * If delay is non-zero, and the state change is an initial multicast
1887 * join, the state change report will be delayed by 'delay' ticks
1888 * in units of PR_FASTHZ if MLDv1 is active on the link; otherwise
1889 * the initial MLDv2 state change report will be delayed by whichever
1890 * is sooner, a pending state-change timer or delay itself.
1891 *
1892 * VIMAGE: curvnet should have been set by caller, as this routine
1893 * is called from the socket option handlers.
1894 */
1895 int
1896 mld_change_state(struct in6_multi *inm, const int delay)
1897 {
1898 struct mld_ifsoftc *mli;
1899 struct ifnet *ifp;
1900 int error;
1901
1902 IN6_MULTI_LIST_LOCK_ASSERT();
1903
1904 error = 0;
1905
1906 /*
1907 * Check if the in6_multi has already been disconnected.
1908 */
1909 if (inm->in6m_ifp == NULL) {
1910 CTR1(KTR_MLD, "%s: inm is disconnected", __func__);
1911 return (0);
1912 }
1913
1914 /*
1915 * Try to detect if the upper layer just asked us to change state
1916 * for an interface which has now gone away.
1917 */
1918 KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__));
1919 ifp = inm->in6m_ifma->ifma_ifp;
1920 if (ifp == NULL)
1921 return (0);
1922 /*
1923 * Sanity check that netinet6's notion of ifp is the
1924 * same as net's.
1925 */
1926 KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__));
1927
1928 MLD_LOCK();
1929 mli = MLD_IFINFO(ifp);
1930 KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
1931
1932 /*
1933 * If we detect a state transition to or from MCAST_UNDEFINED
1934 * for this group, then we are starting or finishing an MLD
1935 * life cycle for this group.
1936 */
1937 if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) {
1938 CTR3(KTR_MLD, "%s: inm transition %d -> %d", __func__,
1939 inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode);
1940 if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) {
1941 CTR1(KTR_MLD, "%s: initial join", __func__);
1942 error = mld_initial_join(inm, mli, delay);
1943 goto out_locked;
1944 } else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) {
1945 CTR1(KTR_MLD, "%s: final leave", __func__);
1946 mld_final_leave(inm, mli);
1947 goto out_locked;
1948 }
1949 } else {
1950 CTR1(KTR_MLD, "%s: filter set change", __func__);
1951 }
1952
1953 error = mld_handle_state_change(inm, mli);
1954
1955 out_locked:
1956 MLD_UNLOCK();
1957 return (error);
1958 }
1959
1960 /*
1961 * Perform the initial join for an MLD group.
1962 *
1963 * When joining a group:
1964 * If the group should have its MLD traffic suppressed, do nothing.
1965 * MLDv1 starts sending MLDv1 host membership reports.
1966 * MLDv2 will schedule an MLDv2 state-change report containing the
1967 * initial state of the membership.
1968 *
1969 * If the delay argument is non-zero, then we must delay sending the
1970 * initial state change for delay ticks (in units of PR_FASTHZ).
1971 */
1972 static int
1973 mld_initial_join(struct in6_multi *inm, struct mld_ifsoftc *mli,
1974 const int delay)
1975 {
1976 struct epoch_tracker et;
1977 struct ifnet *ifp;
1978 struct mbufq *mq;
1979 int error, retval, syncstates;
1980 int odelay;
1981 #ifdef KTR
1982 char ip6tbuf[INET6_ADDRSTRLEN];
1983 #endif
1984
1985 CTR4(KTR_MLD, "%s: initial join %s on ifp %p(%s)",
1986 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
1987 inm->in6m_ifp, if_name(inm->in6m_ifp));
1988
1989 error = 0;
1990 syncstates = 1;
1991
1992 ifp = inm->in6m_ifp;
1993
1994 IN6_MULTI_LIST_LOCK_ASSERT();
1995 MLD_LOCK_ASSERT();
1996
1997 KASSERT(mli && mli->mli_ifp == ifp, ("%s: inconsistent ifp", __func__));
1998
1999 /*
2000 * Groups joined on loopback or marked as 'not reported',
2001 * enter the MLD_SILENT_MEMBER state and
2002 * are never reported in any protocol exchanges.
2003 * All other groups enter the appropriate state machine
2004 * for the version in use on this link.
2005 * A link marked as MLIF_SILENT causes MLD to be completely
2006 * disabled for the link.
2007 */
2008 if ((ifp->if_flags & IFF_LOOPBACK) ||
2009 (mli->mli_flags & MLIF_SILENT) ||
2010 !mld_is_addr_reported(&inm->in6m_addr)) {
2011 CTR1(KTR_MLD,
2012 "%s: not kicking state machine for silent group", __func__);
2013 inm->in6m_state = MLD_SILENT_MEMBER;
2014 inm->in6m_timer = 0;
2015 } else {
2016 /*
2017 * Deal with overlapping in_multi lifecycle.
2018 * If this group was LEAVING, then make sure
2019 * we drop the reference we picked up to keep the
2020 * group around for the final INCLUDE {} enqueue.
2021 */
2022 if (mli->mli_version == MLD_VERSION_2 &&
2023 inm->in6m_state == MLD_LEAVING_MEMBER) {
2024 inm->in6m_refcount--;
2025 MPASS(inm->in6m_refcount > 0);
2026 }
2027 inm->in6m_state = MLD_REPORTING_MEMBER;
2028
2029 switch (mli->mli_version) {
2030 case MLD_VERSION_1:
2031 /*
2032 * If a delay was provided, only use it if
2033 * it is greater than the delay normally
2034 * used for an MLDv1 state change report,
2035 * and delay sending the initial MLDv1 report
2036 * by not transitioning to the IDLE state.
2037 */
2038 odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI * PR_FASTHZ);
2039 if (delay) {
2040 inm->in6m_timer = max(delay, odelay);
2041 V_current_state_timers_running6 = 1;
2042 } else {
2043 inm->in6m_state = MLD_IDLE_MEMBER;
2044 NET_EPOCH_ENTER(et);
2045 error = mld_v1_transmit_report(inm,
2046 MLD_LISTENER_REPORT);
2047 NET_EPOCH_EXIT(et);
2048 if (error == 0) {
2049 inm->in6m_timer = odelay;
2050 V_current_state_timers_running6 = 1;
2051 }
2052 }
2053 break;
2054
2055 case MLD_VERSION_2:
2056 /*
2057 * Defer update of T0 to T1, until the first copy
2058 * of the state change has been transmitted.
2059 */
2060 syncstates = 0;
2061
2062 /*
2063 * Immediately enqueue a State-Change Report for
2064 * this interface, freeing any previous reports.
2065 * Don't kick the timers if there is nothing to do,
2066 * or if an error occurred.
2067 */
2068 mq = &inm->in6m_scq;
2069 mbufq_drain(mq);
2070 retval = mld_v2_enqueue_group_record(mq, inm, 1,
2071 0, 0, (mli->mli_flags & MLIF_USEALLOW));
2072 CTR2(KTR_MLD, "%s: enqueue record = %d",
2073 __func__, retval);
2074 if (retval <= 0) {
2075 error = retval * -1;
2076 break;
2077 }
2078
2079 /*
2080 * Schedule transmission of pending state-change
2081 * report up to RV times for this link. The timer
2082 * will fire at the next mld_fasttimo (~200ms),
2083 * giving us an opportunity to merge the reports.
2084 *
2085 * If a delay was provided to this function, only
2086 * use this delay if sooner than the existing one.
2087 */
2088 KASSERT(mli->mli_rv > 1,
2089 ("%s: invalid robustness %d", __func__,
2090 mli->mli_rv));
2091 inm->in6m_scrv = mli->mli_rv;
2092 if (delay) {
2093 if (inm->in6m_sctimer > 1) {
2094 inm->in6m_sctimer =
2095 min(inm->in6m_sctimer, delay);
2096 } else
2097 inm->in6m_sctimer = delay;
2098 } else
2099 inm->in6m_sctimer = 1;
2100 V_state_change_timers_running6 = 1;
2101
2102 error = 0;
2103 break;
2104 }
2105 }
2106
2107 /*
2108 * Only update the T0 state if state change is atomic,
2109 * i.e. we don't need to wait for a timer to fire before we
2110 * can consider the state change to have been communicated.
2111 */
2112 if (syncstates) {
2113 in6m_commit(inm);
2114 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2115 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2116 if_name(inm->in6m_ifp));
2117 }
2118
2119 return (error);
2120 }
2121
2122 /*
2123 * Issue an intermediate state change during the life-cycle.
2124 */
2125 static int
2126 mld_handle_state_change(struct in6_multi *inm, struct mld_ifsoftc *mli)
2127 {
2128 struct ifnet *ifp;
2129 int retval;
2130 #ifdef KTR
2131 char ip6tbuf[INET6_ADDRSTRLEN];
2132 #endif
2133
2134 CTR4(KTR_MLD, "%s: state change for %s on ifp %p(%s)",
2135 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2136 inm->in6m_ifp, if_name(inm->in6m_ifp));
2137
2138 ifp = inm->in6m_ifp;
2139
2140 IN6_MULTI_LIST_LOCK_ASSERT();
2141 MLD_LOCK_ASSERT();
2142
2143 KASSERT(mli && mli->mli_ifp == ifp,
2144 ("%s: inconsistent ifp", __func__));
2145
2146 if ((ifp->if_flags & IFF_LOOPBACK) ||
2147 (mli->mli_flags & MLIF_SILENT) ||
2148 !mld_is_addr_reported(&inm->in6m_addr) ||
2149 (mli->mli_version != MLD_VERSION_2)) {
2150 if (!mld_is_addr_reported(&inm->in6m_addr)) {
2151 CTR1(KTR_MLD,
2152 "%s: not kicking state machine for silent group", __func__);
2153 }
2154 CTR1(KTR_MLD, "%s: nothing to do", __func__);
2155 in6m_commit(inm);
2156 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2157 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2158 if_name(inm->in6m_ifp));
2159 return (0);
2160 }
2161
2162 mbufq_drain(&inm->in6m_scq);
2163
2164 retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0,
2165 (mli->mli_flags & MLIF_USEALLOW));
2166 CTR2(KTR_MLD, "%s: enqueue record = %d", __func__, retval);
2167 if (retval <= 0)
2168 return (-retval);
2169
2170 /*
2171 * If record(s) were enqueued, start the state-change
2172 * report timer for this group.
2173 */
2174 inm->in6m_scrv = mli->mli_rv;
2175 inm->in6m_sctimer = 1;
2176 V_state_change_timers_running6 = 1;
2177
2178 return (0);
2179 }
2180
2181 /*
2182 * Perform the final leave for a multicast address.
2183 *
2184 * When leaving a group:
2185 * MLDv1 sends a DONE message, if and only if we are the reporter.
2186 * MLDv2 enqueues a state-change report containing a transition
2187 * to INCLUDE {} for immediate transmission.
2188 */
2189 static void
2190 mld_final_leave(struct in6_multi *inm, struct mld_ifsoftc *mli)
2191 {
2192 struct epoch_tracker et;
2193 int syncstates;
2194 #ifdef KTR
2195 char ip6tbuf[INET6_ADDRSTRLEN];
2196 #endif
2197
2198 syncstates = 1;
2199
2200 CTR4(KTR_MLD, "%s: final leave %s on ifp %p(%s)",
2201 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2202 inm->in6m_ifp, if_name(inm->in6m_ifp));
2203
2204 IN6_MULTI_LIST_LOCK_ASSERT();
2205 MLD_LOCK_ASSERT();
2206
2207 switch (inm->in6m_state) {
2208 case MLD_NOT_MEMBER:
2209 case MLD_SILENT_MEMBER:
2210 case MLD_LEAVING_MEMBER:
2211 /* Already leaving or left; do nothing. */
2212 CTR1(KTR_MLD,
2213 "%s: not kicking state machine for silent group", __func__);
2214 break;
2215 case MLD_REPORTING_MEMBER:
2216 case MLD_IDLE_MEMBER:
2217 case MLD_G_QUERY_PENDING_MEMBER:
2218 case MLD_SG_QUERY_PENDING_MEMBER:
2219 if (mli->mli_version == MLD_VERSION_1) {
2220 #ifdef INVARIANTS
2221 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
2222 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER)
2223 panic("%s: MLDv2 state reached, not MLDv2 mode",
2224 __func__);
2225 #endif
2226 NET_EPOCH_ENTER(et);
2227 mld_v1_transmit_report(inm, MLD_LISTENER_DONE);
2228 NET_EPOCH_EXIT(et);
2229 inm->in6m_state = MLD_NOT_MEMBER;
2230 V_current_state_timers_running6 = 1;
2231 } else if (mli->mli_version == MLD_VERSION_2) {
2232 /*
2233 * Stop group timer and all pending reports.
2234 * Immediately enqueue a state-change report
2235 * TO_IN {} to be sent on the next fast timeout,
2236 * giving us an opportunity to merge reports.
2237 */
2238 mbufq_drain(&inm->in6m_scq);
2239 inm->in6m_timer = 0;
2240 inm->in6m_scrv = mli->mli_rv;
2241 CTR4(KTR_MLD, "%s: Leaving %s/%s with %d "
2242 "pending retransmissions.", __func__,
2243 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2244 if_name(inm->in6m_ifp), inm->in6m_scrv);
2245 if (inm->in6m_scrv == 0) {
2246 inm->in6m_state = MLD_NOT_MEMBER;
2247 inm->in6m_sctimer = 0;
2248 } else {
2249 int retval;
2250
2251 in6m_acquire_locked(inm);
2252
2253 retval = mld_v2_enqueue_group_record(
2254 &inm->in6m_scq, inm, 1, 0, 0,
2255 (mli->mli_flags & MLIF_USEALLOW));
2256 KASSERT(retval != 0,
2257 ("%s: enqueue record = %d", __func__,
2258 retval));
2259
2260 inm->in6m_state = MLD_LEAVING_MEMBER;
2261 inm->in6m_sctimer = 1;
2262 V_state_change_timers_running6 = 1;
2263 syncstates = 0;
2264 }
2265 break;
2266 }
2267 break;
2268 case MLD_LAZY_MEMBER:
2269 case MLD_SLEEPING_MEMBER:
2270 case MLD_AWAKENING_MEMBER:
2271 /* Our reports are suppressed; do nothing. */
2272 break;
2273 }
2274
2275 if (syncstates) {
2276 in6m_commit(inm);
2277 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2278 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2279 if_name(inm->in6m_ifp));
2280 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
2281 CTR3(KTR_MLD, "%s: T1 now MCAST_UNDEFINED for %p/%s",
2282 __func__, &inm->in6m_addr, if_name(inm->in6m_ifp));
2283 }
2284 }
2285
2286 /*
2287 * Enqueue an MLDv2 group record to the given output queue.
2288 *
2289 * If is_state_change is zero, a current-state record is appended.
2290 * If is_state_change is non-zero, a state-change report is appended.
2291 *
2292 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2293 * If is_group_query is zero, and if there is a packet with free space
2294 * at the tail of the queue, it will be appended to providing there
2295 * is enough free space.
2296 * Otherwise a new mbuf packet chain is allocated.
2297 *
2298 * If is_source_query is non-zero, each source is checked to see if
2299 * it was recorded for a Group-Source query, and will be omitted if
2300 * it is not both in-mode and recorded.
2301 *
2302 * If use_block_allow is non-zero, state change reports for initial join
2303 * and final leave, on an inclusive mode group with a source list, will be
2304 * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively.
2305 *
2306 * The function will attempt to allocate leading space in the packet
2307 * for the IPv6+ICMP headers to be prepended without fragmenting the chain.
2308 *
2309 * If successful the size of all data appended to the queue is returned,
2310 * otherwise an error code less than zero is returned, or zero if
2311 * no record(s) were appended.
2312 */
2313 static int
2314 mld_v2_enqueue_group_record(struct mbufq *mq, struct in6_multi *inm,
2315 const int is_state_change, const int is_group_query,
2316 const int is_source_query, const int use_block_allow)
2317 {
2318 struct mldv2_record mr;
2319 struct mldv2_record *pmr;
2320 struct ifnet *ifp;
2321 struct ip6_msource *ims, *nims;
2322 struct mbuf *m0, *m, *md;
2323 int is_filter_list_change;
2324 int minrec0len, m0srcs, msrcs, nbytes, off;
2325 int record_has_sources;
2326 int now;
2327 int type;
2328 uint8_t mode;
2329 #ifdef KTR
2330 char ip6tbuf[INET6_ADDRSTRLEN];
2331 #endif
2332
2333 IN6_MULTI_LIST_LOCK_ASSERT();
2334
2335 ifp = inm->in6m_ifp;
2336 is_filter_list_change = 0;
2337 m = NULL;
2338 m0 = NULL;
2339 m0srcs = 0;
2340 msrcs = 0;
2341 nbytes = 0;
2342 nims = NULL;
2343 record_has_sources = 1;
2344 pmr = NULL;
2345 type = MLD_DO_NOTHING;
2346 mode = inm->in6m_st[1].iss_fmode;
2347
2348 /*
2349 * If we did not transition out of ASM mode during t0->t1,
2350 * and there are no source nodes to process, we can skip
2351 * the generation of source records.
2352 */
2353 if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 &&
2354 inm->in6m_nsrc == 0)
2355 record_has_sources = 0;
2356
2357 if (is_state_change) {
2358 /*
2359 * Queue a state change record.
2360 * If the mode did not change, and there are non-ASM
2361 * listeners or source filters present,
2362 * we potentially need to issue two records for the group.
2363 * If there are ASM listeners, and there was no filter
2364 * mode transition of any kind, do nothing.
2365 *
2366 * If we are transitioning to MCAST_UNDEFINED, we need
2367 * not send any sources. A transition to/from this state is
2368 * considered inclusive with some special treatment.
2369 *
2370 * If we are rewriting initial joins/leaves to use
2371 * ALLOW/BLOCK, and the group's membership is inclusive,
2372 * we need to send sources in all cases.
2373 */
2374 if (mode != inm->in6m_st[0].iss_fmode) {
2375 if (mode == MCAST_EXCLUDE) {
2376 CTR1(KTR_MLD, "%s: change to EXCLUDE",
2377 __func__);
2378 type = MLD_CHANGE_TO_EXCLUDE_MODE;
2379 } else {
2380 CTR1(KTR_MLD, "%s: change to INCLUDE",
2381 __func__);
2382 if (use_block_allow) {
2383 /*
2384 * XXX
2385 * Here we're interested in state
2386 * edges either direction between
2387 * MCAST_UNDEFINED and MCAST_INCLUDE.
2388 * Perhaps we should just check
2389 * the group state, rather than
2390 * the filter mode.
2391 */
2392 if (mode == MCAST_UNDEFINED) {
2393 type = MLD_BLOCK_OLD_SOURCES;
2394 } else {
2395 type = MLD_ALLOW_NEW_SOURCES;
2396 }
2397 } else {
2398 type = MLD_CHANGE_TO_INCLUDE_MODE;
2399 if (mode == MCAST_UNDEFINED)
2400 record_has_sources = 0;
2401 }
2402 }
2403 } else {
2404 if (record_has_sources) {
2405 is_filter_list_change = 1;
2406 } else {
2407 type = MLD_DO_NOTHING;
2408 }
2409 }
2410 } else {
2411 /*
2412 * Queue a current state record.
2413 */
2414 if (mode == MCAST_EXCLUDE) {
2415 type = MLD_MODE_IS_EXCLUDE;
2416 } else if (mode == MCAST_INCLUDE) {
2417 type = MLD_MODE_IS_INCLUDE;
2418 KASSERT(inm->in6m_st[1].iss_asm == 0,
2419 ("%s: inm %p is INCLUDE but ASM count is %d",
2420 __func__, inm, inm->in6m_st[1].iss_asm));
2421 }
2422 }
2423
2424 /*
2425 * Generate the filter list changes using a separate function.
2426 */
2427 if (is_filter_list_change)
2428 return (mld_v2_enqueue_filter_change(mq, inm));
2429
2430 if (type == MLD_DO_NOTHING) {
2431 CTR3(KTR_MLD, "%s: nothing to do for %s/%s",
2432 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2433 if_name(inm->in6m_ifp));
2434 return (0);
2435 }
2436
2437 /*
2438 * If any sources are present, we must be able to fit at least
2439 * one in the trailing space of the tail packet's mbuf,
2440 * ideally more.
2441 */
2442 minrec0len = sizeof(struct mldv2_record);
2443 if (record_has_sources)
2444 minrec0len += sizeof(struct in6_addr);
2445
2446 CTR4(KTR_MLD, "%s: queueing %s for %s/%s", __func__,
2447 mld_rec_type_to_str(type),
2448 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2449 if_name(inm->in6m_ifp));
2450
2451 /*
2452 * Check if we have a packet in the tail of the queue for this
2453 * group into which the first group record for this group will fit.
2454 * Otherwise allocate a new packet.
2455 * Always allocate leading space for IP6+RA+ICMPV6+REPORT.
2456 * Note: Group records for G/GSR query responses MUST be sent
2457 * in their own packet.
2458 */
2459 m0 = mbufq_last(mq);
2460 if (!is_group_query &&
2461 m0 != NULL &&
2462 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) &&
2463 (m0->m_pkthdr.len + minrec0len) <
2464 (ifp->if_mtu - MLD_MTUSPACE)) {
2465 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2466 sizeof(struct mldv2_record)) /
2467 sizeof(struct in6_addr);
2468 m = m0;
2469 CTR1(KTR_MLD, "%s: use existing packet", __func__);
2470 } else {
2471 if (mbufq_full(mq)) {
2472 CTR1(KTR_MLD, "%s: outbound queue full", __func__);
2473 return (-ENOMEM);
2474 }
2475 m = NULL;
2476 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2477 sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
2478 if (!is_state_change && !is_group_query)
2479 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2480 if (m == NULL)
2481 m = m_gethdr(M_NOWAIT, MT_DATA);
2482 if (m == NULL)
2483 return (-ENOMEM);
2484
2485 mld_save_context(m, ifp);
2486
2487 CTR1(KTR_MLD, "%s: allocated first packet", __func__);
2488 }
2489
2490 /*
2491 * Append group record.
2492 * If we have sources, we don't know how many yet.
2493 */
2494 mr.mr_type = type;
2495 mr.mr_datalen = 0;
2496 mr.mr_numsrc = 0;
2497 mr.mr_addr = inm->in6m_addr;
2498 in6_clearscope(&mr.mr_addr);
2499 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
2500 if (m != m0)
2501 m_freem(m);
2502 CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
2503 return (-ENOMEM);
2504 }
2505 nbytes += sizeof(struct mldv2_record);
2506
2507 /*
2508 * Append as many sources as will fit in the first packet.
2509 * If we are appending to a new packet, the chain allocation
2510 * may potentially use clusters; use m_getptr() in this case.
2511 * If we are appending to an existing packet, we need to obtain
2512 * a pointer to the group record after m_append(), in case a new
2513 * mbuf was allocated.
2514 *
2515 * Only append sources which are in-mode at t1. If we are
2516 * transitioning to MCAST_UNDEFINED state on the group, and
2517 * use_block_allow is zero, do not include source entries.
2518 * Otherwise, we need to include this source in the report.
2519 *
2520 * Only report recorded sources in our filter set when responding
2521 * to a group-source query.
2522 */
2523 if (record_has_sources) {
2524 if (m == m0) {
2525 md = m_last(m);
2526 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
2527 md->m_len - nbytes);
2528 } else {
2529 md = m_getptr(m, 0, &off);
2530 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
2531 off);
2532 }
2533 msrcs = 0;
2534 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs,
2535 nims) {
2536 CTR2(KTR_MLD, "%s: visit node %s", __func__,
2537 ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2538 now = im6s_get_mode(inm, ims, 1);
2539 CTR2(KTR_MLD, "%s: node is %d", __func__, now);
2540 if ((now != mode) ||
2541 (now == mode &&
2542 (!use_block_allow && mode == MCAST_UNDEFINED))) {
2543 CTR1(KTR_MLD, "%s: skip node", __func__);
2544 continue;
2545 }
2546 if (is_source_query && ims->im6s_stp == 0) {
2547 CTR1(KTR_MLD, "%s: skip unrecorded node",
2548 __func__);
2549 continue;
2550 }
2551 CTR1(KTR_MLD, "%s: append node", __func__);
2552 if (!m_append(m, sizeof(struct in6_addr),
2553 (void *)&ims->im6s_addr)) {
2554 if (m != m0)
2555 m_freem(m);
2556 CTR1(KTR_MLD, "%s: m_append() failed.",
2557 __func__);
2558 return (-ENOMEM);
2559 }
2560 nbytes += sizeof(struct in6_addr);
2561 ++msrcs;
2562 if (msrcs == m0srcs)
2563 break;
2564 }
2565 CTR2(KTR_MLD, "%s: msrcs is %d this packet", __func__,
2566 msrcs);
2567 pmr->mr_numsrc = htons(msrcs);
2568 nbytes += (msrcs * sizeof(struct in6_addr));
2569 }
2570
2571 if (is_source_query && msrcs == 0) {
2572 CTR1(KTR_MLD, "%s: no recorded sources to report", __func__);
2573 if (m != m0)
2574 m_freem(m);
2575 return (0);
2576 }
2577
2578 /*
2579 * We are good to go with first packet.
2580 */
2581 if (m != m0) {
2582 CTR1(KTR_MLD, "%s: enqueueing first packet", __func__);
2583 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2584 mbufq_enqueue(mq, m);
2585 } else
2586 m->m_pkthdr.PH_vt.vt_nrecs++;
2587
2588 /*
2589 * No further work needed if no source list in packet(s).
2590 */
2591 if (!record_has_sources)
2592 return (nbytes);
2593
2594 /*
2595 * Whilst sources remain to be announced, we need to allocate
2596 * a new packet and fill out as many sources as will fit.
2597 * Always try for a cluster first.
2598 */
2599 while (nims != NULL) {
2600 if (mbufq_full(mq)) {
2601 CTR1(KTR_MLD, "%s: outbound queue full", __func__);
2602 return (-ENOMEM);
2603 }
2604 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2605 if (m == NULL)
2606 m = m_gethdr(M_NOWAIT, MT_DATA);
2607 if (m == NULL)
2608 return (-ENOMEM);
2609 mld_save_context(m, ifp);
2610 md = m_getptr(m, 0, &off);
2611 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off);
2612 CTR1(KTR_MLD, "%s: allocated next packet", __func__);
2613
2614 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
2615 if (m != m0)
2616 m_freem(m);
2617 CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
2618 return (-ENOMEM);
2619 }
2620 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2621 nbytes += sizeof(struct mldv2_record);
2622
2623 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2624 sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
2625
2626 msrcs = 0;
2627 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2628 CTR2(KTR_MLD, "%s: visit node %s",
2629 __func__, ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2630 now = im6s_get_mode(inm, ims, 1);
2631 if ((now != mode) ||
2632 (now == mode &&
2633 (!use_block_allow && mode == MCAST_UNDEFINED))) {
2634 CTR1(KTR_MLD, "%s: skip node", __func__);
2635 continue;
2636 }
2637 if (is_source_query && ims->im6s_stp == 0) {
2638 CTR1(KTR_MLD, "%s: skip unrecorded node",
2639 __func__);
2640 continue;
2641 }
2642 CTR1(KTR_MLD, "%s: append node", __func__);
2643 if (!m_append(m, sizeof(struct in6_addr),
2644 (void *)&ims->im6s_addr)) {
2645 if (m != m0)
2646 m_freem(m);
2647 CTR1(KTR_MLD, "%s: m_append() failed.",
2648 __func__);
2649 return (-ENOMEM);
2650 }
2651 ++msrcs;
2652 if (msrcs == m0srcs)
2653 break;
2654 }
2655 pmr->mr_numsrc = htons(msrcs);
2656 nbytes += (msrcs * sizeof(struct in6_addr));
2657
2658 CTR1(KTR_MLD, "%s: enqueueing next packet", __func__);
2659 mbufq_enqueue(mq, m);
2660 }
2661
2662 return (nbytes);
2663 }
2664
2665 /*
2666 * Type used to mark record pass completion.
2667 * We exploit the fact we can cast to this easily from the
2668 * current filter modes on each ip_msource node.
2669 */
2670 typedef enum {
2671 REC_NONE = 0x00, /* MCAST_UNDEFINED */
2672 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
2673 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
2674 REC_FULL = REC_ALLOW | REC_BLOCK
2675 } rectype_t;
2676
2677 /*
2678 * Enqueue an MLDv2 filter list change to the given output queue.
2679 *
2680 * Source list filter state is held in an RB-tree. When the filter list
2681 * for a group is changed without changing its mode, we need to compute
2682 * the deltas between T0 and T1 for each source in the filter set,
2683 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
2684 *
2685 * As we may potentially queue two record types, and the entire R-B tree
2686 * needs to be walked at once, we break this out into its own function
2687 * so we can generate a tightly packed queue of packets.
2688 *
2689 * XXX This could be written to only use one tree walk, although that makes
2690 * serializing into the mbuf chains a bit harder. For now we do two walks
2691 * which makes things easier on us, and it may or may not be harder on
2692 * the L2 cache.
2693 *
2694 * If successful the size of all data appended to the queue is returned,
2695 * otherwise an error code less than zero is returned, or zero if
2696 * no record(s) were appended.
2697 */
2698 static int
2699 mld_v2_enqueue_filter_change(struct mbufq *mq, struct in6_multi *inm)
2700 {
2701 static const int MINRECLEN =
2702 sizeof(struct mldv2_record) + sizeof(struct in6_addr);
2703 struct ifnet *ifp;
2704 struct mldv2_record mr;
2705 struct mldv2_record *pmr;
2706 struct ip6_msource *ims, *nims;
2707 struct mbuf *m, *m0, *md;
2708 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
2709 int nallow, nblock;
2710 uint8_t mode, now, then;
2711 rectype_t crt, drt, nrt;
2712 #ifdef KTR
2713 char ip6tbuf[INET6_ADDRSTRLEN];
2714 #endif
2715
2716 IN6_MULTI_LIST_LOCK_ASSERT();
2717
2718 if (inm->in6m_nsrc == 0 ||
2719 (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0))
2720 return (0);
2721
2722 ifp = inm->in6m_ifp; /* interface */
2723 mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */
2724 crt = REC_NONE; /* current group record type */
2725 drt = REC_NONE; /* mask of completed group record types */
2726 nrt = REC_NONE; /* record type for current node */
2727 m0srcs = 0; /* # source which will fit in current mbuf chain */
2728 npbytes = 0; /* # of bytes appended this packet */
2729 nbytes = 0; /* # of bytes appended to group's state-change queue */
2730 rsrcs = 0; /* # sources encoded in current record */
2731 schanged = 0; /* # nodes encoded in overall filter change */
2732 nallow = 0; /* # of source entries in ALLOW_NEW */
2733 nblock = 0; /* # of source entries in BLOCK_OLD */
2734 nims = NULL; /* next tree node pointer */
2735
2736 /*
2737 * For each possible filter record mode.
2738 * The first kind of source we encounter tells us which
2739 * is the first kind of record we start appending.
2740 * If a node transitioned to UNDEFINED at t1, its mode is treated
2741 * as the inverse of the group's filter mode.
2742 */
2743 while (drt != REC_FULL) {
2744 do {
2745 m0 = mbufq_last(mq);
2746 if (m0 != NULL &&
2747 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
2748 MLD_V2_REPORT_MAXRECS) &&
2749 (m0->m_pkthdr.len + MINRECLEN) <
2750 (ifp->if_mtu - MLD_MTUSPACE)) {
2751 m = m0;
2752 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2753 sizeof(struct mldv2_record)) /
2754 sizeof(struct in6_addr);
2755 CTR1(KTR_MLD,
2756 "%s: use previous packet", __func__);
2757 } else {
2758 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2759 if (m == NULL)
2760 m = m_gethdr(M_NOWAIT, MT_DATA);
2761 if (m == NULL) {
2762 CTR1(KTR_MLD,
2763 "%s: m_get*() failed", __func__);
2764 return (-ENOMEM);
2765 }
2766 m->m_pkthdr.PH_vt.vt_nrecs = 0;
2767 mld_save_context(m, ifp);
2768 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2769 sizeof(struct mldv2_record)) /
2770 sizeof(struct in6_addr);
2771 npbytes = 0;
2772 CTR1(KTR_MLD,
2773 "%s: allocated new packet", __func__);
2774 }
2775 /*
2776 * Append the MLD group record header to the
2777 * current packet's data area.
2778 * Recalculate pointer to free space for next
2779 * group record, in case m_append() allocated
2780 * a new mbuf or cluster.
2781 */
2782 memset(&mr, 0, sizeof(mr));
2783 mr.mr_addr = inm->in6m_addr;
2784 in6_clearscope(&mr.mr_addr);
2785 if (!m_append(m, sizeof(mr), (void *)&mr)) {
2786 if (m != m0)
2787 m_freem(m);
2788 CTR1(KTR_MLD,
2789 "%s: m_append() failed", __func__);
2790 return (-ENOMEM);
2791 }
2792 npbytes += sizeof(struct mldv2_record);
2793 if (m != m0) {
2794 /* new packet; offset in chain */
2795 md = m_getptr(m, npbytes -
2796 sizeof(struct mldv2_record), &off);
2797 pmr = (struct mldv2_record *)(mtod(md,
2798 uint8_t *) + off);
2799 } else {
2800 /* current packet; offset from last append */
2801 md = m_last(m);
2802 pmr = (struct mldv2_record *)(mtod(md,
2803 uint8_t *) + md->m_len -
2804 sizeof(struct mldv2_record));
2805 }
2806 /*
2807 * Begin walking the tree for this record type
2808 * pass, or continue from where we left off
2809 * previously if we had to allocate a new packet.
2810 * Only report deltas in-mode at t1.
2811 * We need not report included sources as allowed
2812 * if we are in inclusive mode on the group,
2813 * however the converse is not true.
2814 */
2815 rsrcs = 0;
2816 if (nims == NULL) {
2817 nims = RB_MIN(ip6_msource_tree,
2818 &inm->in6m_srcs);
2819 }
2820 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2821 CTR2(KTR_MLD, "%s: visit node %s", __func__,
2822 ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2823 now = im6s_get_mode(inm, ims, 1);
2824 then = im6s_get_mode(inm, ims, 0);
2825 CTR3(KTR_MLD, "%s: mode: t0 %d, t1 %d",
2826 __func__, then, now);
2827 if (now == then) {
2828 CTR1(KTR_MLD,
2829 "%s: skip unchanged", __func__);
2830 continue;
2831 }
2832 if (mode == MCAST_EXCLUDE &&
2833 now == MCAST_INCLUDE) {
2834 CTR1(KTR_MLD,
2835 "%s: skip IN src on EX group",
2836 __func__);
2837 continue;
2838 }
2839 nrt = (rectype_t)now;
2840 if (nrt == REC_NONE)
2841 nrt = (rectype_t)(~mode & REC_FULL);
2842 if (schanged++ == 0) {
2843 crt = nrt;
2844 } else if (crt != nrt)
2845 continue;
2846 if (!m_append(m, sizeof(struct in6_addr),
2847 (void *)&ims->im6s_addr)) {
2848 if (m != m0)
2849 m_freem(m);
2850 CTR1(KTR_MLD,
2851 "%s: m_append() failed", __func__);
2852 return (-ENOMEM);
2853 }
2854 nallow += !!(crt == REC_ALLOW);
2855 nblock += !!(crt == REC_BLOCK);
2856 if (++rsrcs == m0srcs)
2857 break;
2858 }
2859 /*
2860 * If we did not append any tree nodes on this
2861 * pass, back out of allocations.
2862 */
2863 if (rsrcs == 0) {
2864 npbytes -= sizeof(struct mldv2_record);
2865 if (m != m0) {
2866 CTR1(KTR_MLD,
2867 "%s: m_free(m)", __func__);
2868 m_freem(m);
2869 } else {
2870 CTR1(KTR_MLD,
2871 "%s: m_adj(m, -mr)", __func__);
2872 m_adj(m, -((int)sizeof(
2873 struct mldv2_record)));
2874 }
2875 continue;
2876 }
2877 npbytes += (rsrcs * sizeof(struct in6_addr));
2878 if (crt == REC_ALLOW)
2879 pmr->mr_type = MLD_ALLOW_NEW_SOURCES;
2880 else if (crt == REC_BLOCK)
2881 pmr->mr_type = MLD_BLOCK_OLD_SOURCES;
2882 pmr->mr_numsrc = htons(rsrcs);
2883 /*
2884 * Count the new group record, and enqueue this
2885 * packet if it wasn't already queued.
2886 */
2887 m->m_pkthdr.PH_vt.vt_nrecs++;
2888 if (m != m0)
2889 mbufq_enqueue(mq, m);
2890 nbytes += npbytes;
2891 } while (nims != NULL);
2892 drt |= crt;
2893 crt = (~crt & REC_FULL);
2894 }
2895
2896 CTR3(KTR_MLD, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
2897 nallow, nblock);
2898
2899 return (nbytes);
2900 }
2901
2902 static int
2903 mld_v2_merge_state_changes(struct in6_multi *inm, struct mbufq *scq)
2904 {
2905 struct mbufq *gq;
2906 struct mbuf *m; /* pending state-change */
2907 struct mbuf *m0; /* copy of pending state-change */
2908 struct mbuf *mt; /* last state-change in packet */
2909 int docopy, domerge;
2910 u_int recslen;
2911
2912 docopy = 0;
2913 domerge = 0;
2914 recslen = 0;
2915
2916 IN6_MULTI_LIST_LOCK_ASSERT();
2917 MLD_LOCK_ASSERT();
2918
2919 /*
2920 * If there are further pending retransmissions, make a writable
2921 * copy of each queued state-change message before merging.
2922 */
2923 if (inm->in6m_scrv > 0)
2924 docopy = 1;
2925
2926 gq = &inm->in6m_scq;
2927 #ifdef KTR
2928 if (mbufq_first(gq) == NULL) {
2929 CTR2(KTR_MLD, "%s: WARNING: queue for inm %p is empty",
2930 __func__, inm);
2931 }
2932 #endif
2933
2934 m = mbufq_first(gq);
2935 while (m != NULL) {
2936 /*
2937 * Only merge the report into the current packet if
2938 * there is sufficient space to do so; an MLDv2 report
2939 * packet may only contain 65,535 group records.
2940 * Always use a simple mbuf chain concatentation to do this,
2941 * as large state changes for single groups may have
2942 * allocated clusters.
2943 */
2944 domerge = 0;
2945 mt = mbufq_last(scq);
2946 if (mt != NULL) {
2947 recslen = m_length(m, NULL);
2948
2949 if ((mt->m_pkthdr.PH_vt.vt_nrecs +
2950 m->m_pkthdr.PH_vt.vt_nrecs <=
2951 MLD_V2_REPORT_MAXRECS) &&
2952 (mt->m_pkthdr.len + recslen <=
2953 (inm->in6m_ifp->if_mtu - MLD_MTUSPACE)))
2954 domerge = 1;
2955 }
2956
2957 if (!domerge && mbufq_full(gq)) {
2958 CTR2(KTR_MLD,
2959 "%s: outbound queue full, skipping whole packet %p",
2960 __func__, m);
2961 mt = m->m_nextpkt;
2962 if (!docopy)
2963 m_freem(m);
2964 m = mt;
2965 continue;
2966 }
2967
2968 if (!docopy) {
2969 CTR2(KTR_MLD, "%s: dequeueing %p", __func__, m);
2970 m0 = mbufq_dequeue(gq);
2971 m = m0->m_nextpkt;
2972 } else {
2973 CTR2(KTR_MLD, "%s: copying %p", __func__, m);
2974 m0 = m_dup(m, M_NOWAIT);
2975 if (m0 == NULL)
2976 return (ENOMEM);
2977 m0->m_nextpkt = NULL;
2978 m = m->m_nextpkt;
2979 }
2980
2981 if (!domerge) {
2982 CTR3(KTR_MLD, "%s: queueing %p to scq %p)",
2983 __func__, m0, scq);
2984 mbufq_enqueue(scq, m0);
2985 } else {
2986 struct mbuf *mtl; /* last mbuf of packet mt */
2987
2988 CTR3(KTR_MLD, "%s: merging %p with ifscq tail %p)",
2989 __func__, m0, mt);
2990
2991 mtl = m_last(mt);
2992 m0->m_flags &= ~M_PKTHDR;
2993 mt->m_pkthdr.len += recslen;
2994 mt->m_pkthdr.PH_vt.vt_nrecs +=
2995 m0->m_pkthdr.PH_vt.vt_nrecs;
2996
2997 mtl->m_next = m0;
2998 }
2999 }
3000
3001 return (0);
3002 }
3003
3004 /*
3005 * Respond to a pending MLDv2 General Query.
3006 */
3007 static void
3008 mld_v2_dispatch_general_query(struct mld_ifsoftc *mli)
3009 {
3010 struct ifmultiaddr *ifma;
3011 struct ifnet *ifp;
3012 struct in6_multi *inm;
3013 int retval;
3014
3015 NET_EPOCH_ASSERT();
3016 IN6_MULTI_LIST_LOCK_ASSERT();
3017 MLD_LOCK_ASSERT();
3018
3019 KASSERT(mli->mli_version == MLD_VERSION_2,
3020 ("%s: called when version %d", __func__, mli->mli_version));
3021
3022 /*
3023 * Check that there are some packets queued. If so, send them first.
3024 * For large number of groups the reply to general query can take
3025 * many packets, we should finish sending them before starting of
3026 * queuing the new reply.
3027 */
3028 if (mbufq_len(&mli->mli_gq) != 0)
3029 goto send;
3030
3031 ifp = mli->mli_ifp;
3032
3033 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3034 inm = in6m_ifmultiaddr_get_inm(ifma);
3035 if (inm == NULL)
3036 continue;
3037 KASSERT(ifp == inm->in6m_ifp,
3038 ("%s: inconsistent ifp", __func__));
3039
3040 switch (inm->in6m_state) {
3041 case MLD_NOT_MEMBER:
3042 case MLD_SILENT_MEMBER:
3043 break;
3044 case MLD_REPORTING_MEMBER:
3045 case MLD_IDLE_MEMBER:
3046 case MLD_LAZY_MEMBER:
3047 case MLD_SLEEPING_MEMBER:
3048 case MLD_AWAKENING_MEMBER:
3049 inm->in6m_state = MLD_REPORTING_MEMBER;
3050 retval = mld_v2_enqueue_group_record(&mli->mli_gq,
3051 inm, 0, 0, 0, 0);
3052 CTR2(KTR_MLD, "%s: enqueue record = %d",
3053 __func__, retval);
3054 break;
3055 case MLD_G_QUERY_PENDING_MEMBER:
3056 case MLD_SG_QUERY_PENDING_MEMBER:
3057 case MLD_LEAVING_MEMBER:
3058 break;
3059 }
3060 }
3061
3062 send:
3063 mld_dispatch_queue(&mli->mli_gq, MLD_MAX_RESPONSE_BURST);
3064
3065 /*
3066 * Slew transmission of bursts over 500ms intervals.
3067 */
3068 if (mbufq_first(&mli->mli_gq) != NULL) {
3069 mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY(
3070 MLD_RESPONSE_BURST_INTERVAL);
3071 V_interface_timers_running6 = 1;
3072 }
3073 }
3074
3075 /*
3076 * Transmit the next pending message in the output queue.
3077 *
3078 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3079 * MRT: Nothing needs to be done, as MLD traffic is always local to
3080 * a link and uses a link-scope multicast address.
3081 */
3082 static void
3083 mld_dispatch_packet(struct mbuf *m)
3084 {
3085 struct ip6_moptions im6o;
3086 struct ifnet *ifp;
3087 struct ifnet *oifp;
3088 struct mbuf *m0;
3089 struct mbuf *md;
3090 struct ip6_hdr *ip6;
3091 struct mld_hdr *mld;
3092 int error;
3093 int off;
3094 int type;
3095 uint32_t ifindex;
3096
3097 CTR2(KTR_MLD, "%s: transmit %p", __func__, m);
3098 NET_EPOCH_ASSERT();
3099
3100 /*
3101 * Set VNET image pointer from enqueued mbuf chain
3102 * before doing anything else. Whilst we use interface
3103 * indexes to guard against interface detach, they are
3104 * unique to each VIMAGE and must be retrieved.
3105 */
3106 ifindex = mld_restore_context(m);
3107
3108 /*
3109 * Check if the ifnet still exists. This limits the scope of
3110 * any race in the absence of a global ifp lock for low cost
3111 * (an array lookup).
3112 */
3113 ifp = ifnet_byindex(ifindex);
3114 if (ifp == NULL) {
3115 CTR3(KTR_MLD, "%s: dropped %p as ifindex %u went away.",
3116 __func__, m, ifindex);
3117 m_freem(m);
3118 IP6STAT_INC(ip6s_noroute);
3119 goto out;
3120 }
3121
3122 im6o.im6o_multicast_hlim = 1;
3123 im6o.im6o_multicast_loop = (V_ip6_mrouter != NULL);
3124 im6o.im6o_multicast_ifp = ifp;
3125
3126 if (m->m_flags & M_MLDV1) {
3127 m0 = m;
3128 } else {
3129 m0 = mld_v2_encap_report(ifp, m);
3130 if (m0 == NULL) {
3131 CTR2(KTR_MLD, "%s: dropped %p", __func__, m);
3132 IP6STAT_INC(ip6s_odropped);
3133 goto out;
3134 }
3135 }
3136
3137 mld_scrub_context(m0);
3138 m_clrprotoflags(m);
3139 m0->m_pkthdr.rcvif = V_loif;
3140
3141 ip6 = mtod(m0, struct ip6_hdr *);
3142 #if 0
3143 (void)in6_setscope(&ip6->ip6_dst, ifp, NULL); /* XXX LOR */
3144 #else
3145 /*
3146 * XXX XXX Break some KPI rules to prevent an LOR which would
3147 * occur if we called in6_setscope() at transmission.
3148 * See comments at top of file.
3149 */
3150 MLD_EMBEDSCOPE(&ip6->ip6_dst, ifp->if_index);
3151 #endif
3152
3153 /*
3154 * Retrieve the ICMPv6 type before handoff to ip6_output(),
3155 * so we can bump the stats.
3156 */
3157 md = m_getptr(m0, sizeof(struct ip6_hdr), &off);
3158 mld = (struct mld_hdr *)(mtod(md, uint8_t *) + off);
3159 type = mld->mld_type;
3160
3161 oifp = NULL;
3162 error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, &im6o,
3163 &oifp, NULL);
3164 if (error) {
3165 CTR3(KTR_MLD, "%s: ip6_output(%p) = %d", __func__, m0, error);
3166 goto out;
3167 }
3168 ICMP6STAT_INC(icp6s_outhist[type]);
3169 if (oifp != NULL) {
3170 icmp6_ifstat_inc(oifp, ifs6_out_msg);
3171 switch (type) {
3172 case MLD_LISTENER_REPORT:
3173 case MLDV2_LISTENER_REPORT:
3174 icmp6_ifstat_inc(oifp, ifs6_out_mldreport);
3175 break;
3176 case MLD_LISTENER_DONE:
3177 icmp6_ifstat_inc(oifp, ifs6_out_mlddone);
3178 break;
3179 }
3180 }
3181 out:
3182 return;
3183 }
3184
3185 /*
3186 * Encapsulate an MLDv2 report.
3187 *
3188 * KAME IPv6 requires that hop-by-hop options be passed separately,
3189 * and that the IPv6 header be prepended in a separate mbuf.
3190 *
3191 * Returns a pointer to the new mbuf chain head, or NULL if the
3192 * allocation failed.
3193 */
3194 static struct mbuf *
3195 mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
3196 {
3197 struct mbuf *mh;
3198 struct mldv2_report *mld;
3199 struct ip6_hdr *ip6;
3200 struct in6_ifaddr *ia;
3201 int mldreclen;
3202
3203 KASSERT(ifp != NULL, ("%s: null ifp", __func__));
3204 KASSERT((m->m_flags & M_PKTHDR),
3205 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3206
3207 /*
3208 * RFC3590: OK to send as :: or tentative during DAD.
3209 */
3210 NET_EPOCH_ASSERT();
3211 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
3212 if (ia == NULL)
3213 CTR1(KTR_MLD, "%s: warning: ia is NULL", __func__);
3214
3215 mh = m_gethdr(M_NOWAIT, MT_DATA);
3216 if (mh == NULL) {
3217 if (ia != NULL)
3218 ifa_free(&ia->ia_ifa);
3219 m_freem(m);
3220 return (NULL);
3221 }
3222 M_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report));
3223
3224 mldreclen = m_length(m, NULL);
3225 CTR2(KTR_MLD, "%s: mldreclen is %d", __func__, mldreclen);
3226
3227 mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report);
3228 mh->m_pkthdr.len = sizeof(struct ip6_hdr) +
3229 sizeof(struct mldv2_report) + mldreclen;
3230
3231 ip6 = mtod(mh, struct ip6_hdr *);
3232 ip6->ip6_flow = 0;
3233 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
3234 ip6->ip6_vfc |= IPV6_VERSION;
3235 ip6->ip6_nxt = IPPROTO_ICMPV6;
3236 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
3237 if (ia != NULL)
3238 ifa_free(&ia->ia_ifa);
3239 ip6->ip6_dst = in6addr_linklocal_allv2routers;
3240 /* scope ID will be set in netisr */
3241
3242 mld = (struct mldv2_report *)(ip6 + 1);
3243 mld->mld_type = MLDV2_LISTENER_REPORT;
3244 mld->mld_code = 0;
3245 mld->mld_cksum = 0;
3246 mld->mld_v2_reserved = 0;
3247 mld->mld_v2_numrecs = htons(m->m_pkthdr.PH_vt.vt_nrecs);
3248 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3249
3250 mh->m_next = m;
3251 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
3252 sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen);
3253 return (mh);
3254 }
3255
3256 #ifdef KTR
3257 static char *
3258 mld_rec_type_to_str(const int type)
3259 {
3260
3261 switch (type) {
3262 case MLD_CHANGE_TO_EXCLUDE_MODE:
3263 return "TO_EX";
3264 break;
3265 case MLD_CHANGE_TO_INCLUDE_MODE:
3266 return "TO_IN";
3267 break;
3268 case MLD_MODE_IS_EXCLUDE:
3269 return "MODE_EX";
3270 break;
3271 case MLD_MODE_IS_INCLUDE:
3272 return "MODE_IN";
3273 break;
3274 case MLD_ALLOW_NEW_SOURCES:
3275 return "ALLOW_NEW";
3276 break;
3277 case MLD_BLOCK_OLD_SOURCES:
3278 return "BLOCK_OLD";
3279 break;
3280 default:
3281 break;
3282 }
3283 return "unknown";
3284 }
3285 #endif
3286
3287 static void
3288 mld_init(void *unused __unused)
3289 {
3290
3291 CTR1(KTR_MLD, "%s: initializing", __func__);
3292 MLD_LOCK_INIT();
3293
3294 ip6_initpktopts(&mld_po);
3295 mld_po.ip6po_hlim = 1;
3296 mld_po.ip6po_hbh = &mld_ra.hbh;
3297 mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER;
3298 mld_po.ip6po_flags = IP6PO_DONTFRAG;
3299 }
3300 SYSINIT(mld_init, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_init, NULL);
3301
3302 static void
3303 mld_uninit(void *unused __unused)
3304 {
3305
3306 CTR1(KTR_MLD, "%s: tearing down", __func__);
3307 MLD_LOCK_DESTROY();
3308 }
3309 SYSUNINIT(mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_uninit, NULL);
3310
3311 static void
3312 vnet_mld_init(const void *unused __unused)
3313 {
3314
3315 CTR1(KTR_MLD, "%s: initializing", __func__);
3316
3317 LIST_INIT(&V_mli_head);
3318 }
3319 VNET_SYSINIT(vnet_mld_init, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_init,
3320 NULL);
3321
3322 static void
3323 vnet_mld_uninit(const void *unused __unused)
3324 {
3325
3326 /* This can happen if we shutdown the network stack. */
3327 CTR1(KTR_MLD, "%s: tearing down", __func__);
3328 }
3329 VNET_SYSUNINIT(vnet_mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_uninit,
3330 NULL);
3331
3332 static int
3333 mld_modevent(module_t mod, int type, void *unused __unused)
3334 {
3335
3336 switch (type) {
3337 case MOD_LOAD:
3338 case MOD_UNLOAD:
3339 break;
3340 default:
3341 return (EOPNOTSUPP);
3342 }
3343 return (0);
3344 }
3345
3346 static moduledata_t mld_mod = {
3347 "mld",
3348 mld_modevent,
3349 0
3350 };
3351 DECLARE_MODULE(mld, mld_mod, SI_SUB_PROTO_MC, SI_ORDER_ANY);
Cache object: 0e7e62bebbda8ad719c80f98eeda65f8
|