FreeBSD/Linux Kernel Cross Reference
sys/netinet6/mld6.c
1 /*-
2 * Copyright (c) 2009 Bruce Simpson.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote
13 * products derived from this software without specific prior written
14 * permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $KAME: mld6.c,v 1.27 2001/04/04 05:17:30 itojun Exp $
29 */
30
31 /*-
32 * Copyright (c) 1988 Stephen Deering.
33 * Copyright (c) 1992, 1993
34 * The Regents of the University of California. All rights reserved.
35 *
36 * This code is derived from software contributed to Berkeley by
37 * Stephen Deering of Stanford University.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
64 */
65
66 #include <sys/cdefs.h>
67 __FBSDID("$FreeBSD: releng/11.0/sys/netinet6/mld6.c 302054 2016-06-21 13:48:49Z bz $");
68
69 #include "opt_inet.h"
70 #include "opt_inet6.h"
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/mbuf.h>
75 #include <sys/socket.h>
76 #include <sys/protosw.h>
77 #include <sys/sysctl.h>
78 #include <sys/kernel.h>
79 #include <sys/callout.h>
80 #include <sys/malloc.h>
81 #include <sys/module.h>
82 #include <sys/ktr.h>
83
84 #include <net/if.h>
85 #include <net/if_var.h>
86 #include <net/route.h>
87 #include <net/vnet.h>
88
89 #include <netinet/in.h>
90 #include <netinet/in_var.h>
91 #include <netinet6/in6_var.h>
92 #include <netinet/ip6.h>
93 #include <netinet6/ip6_var.h>
94 #include <netinet6/scope6_var.h>
95 #include <netinet/icmp6.h>
96 #include <netinet6/mld6.h>
97 #include <netinet6/mld6_var.h>
98
99 #include <security/mac/mac_framework.h>
100
101 #ifndef KTR_MLD
102 #define KTR_MLD KTR_INET6
103 #endif
104
105 static struct mld_ifsoftc *
106 mli_alloc_locked(struct ifnet *);
107 static void mli_delete_locked(const struct ifnet *);
108 static void mld_dispatch_packet(struct mbuf *);
109 static void mld_dispatch_queue(struct mbufq *, int);
110 static void mld_final_leave(struct in6_multi *, struct mld_ifsoftc *);
111 static void mld_fasttimo_vnet(void);
112 static int mld_handle_state_change(struct in6_multi *,
113 struct mld_ifsoftc *);
114 static int mld_initial_join(struct in6_multi *, struct mld_ifsoftc *,
115 const int);
116 #ifdef KTR
117 static char * mld_rec_type_to_str(const int);
118 #endif
119 static void mld_set_version(struct mld_ifsoftc *, const int);
120 static void mld_slowtimo_vnet(void);
121 static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
122 /*const*/ struct mld_hdr *);
123 static int mld_v1_input_report(struct ifnet *, const struct ip6_hdr *,
124 /*const*/ struct mld_hdr *);
125 static void mld_v1_process_group_timer(struct mld_ifsoftc *,
126 struct in6_multi *);
127 static void mld_v1_process_querier_timers(struct mld_ifsoftc *);
128 static int mld_v1_transmit_report(struct in6_multi *, const int);
129 static void mld_v1_update_group(struct in6_multi *, const int);
130 static void mld_v2_cancel_link_timers(struct mld_ifsoftc *);
131 static void mld_v2_dispatch_general_query(struct mld_ifsoftc *);
132 static struct mbuf *
133 mld_v2_encap_report(struct ifnet *, struct mbuf *);
134 static int mld_v2_enqueue_filter_change(struct mbufq *,
135 struct in6_multi *);
136 static int mld_v2_enqueue_group_record(struct mbufq *,
137 struct in6_multi *, const int, const int, const int,
138 const int);
139 static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
140 struct mbuf *, const int, const int);
141 static int mld_v2_merge_state_changes(struct in6_multi *,
142 struct mbufq *);
143 static void mld_v2_process_group_timers(struct mld_ifsoftc *,
144 struct mbufq *, struct mbufq *,
145 struct in6_multi *, const int);
146 static int mld_v2_process_group_query(struct in6_multi *,
147 struct mld_ifsoftc *mli, int, struct mbuf *, const int);
148 static int sysctl_mld_gsr(SYSCTL_HANDLER_ARGS);
149 static int sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS);
150
151 /*
152 * Normative references: RFC 2710, RFC 3590, RFC 3810.
153 *
154 * Locking:
155 * * The MLD subsystem lock ends up being system-wide for the moment,
156 * but could be per-VIMAGE later on.
157 * * The permitted lock order is: IN6_MULTI_LOCK, MLD_LOCK, IF_ADDR_LOCK.
158 * Any may be taken independently; if any are held at the same
159 * time, the above lock order must be followed.
160 * * IN6_MULTI_LOCK covers in_multi.
161 * * MLD_LOCK covers per-link state and any global variables in this file.
162 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
163 * per-link state iterators.
164 *
165 * XXX LOR PREVENTION
166 * A special case for IPv6 is the in6_setscope() routine. ip6_output()
167 * will not accept an ifp; it wants an embedded scope ID, unlike
168 * ip_output(), which happily takes the ifp given to it. The embedded
169 * scope ID is only used by MLD to select the outgoing interface.
170 *
171 * During interface attach and detach, MLD will take MLD_LOCK *after*
172 * the IF_AFDATA_LOCK.
173 * As in6_setscope() takes IF_AFDATA_LOCK then SCOPE_LOCK, we can't call
174 * it with MLD_LOCK held without triggering an LOR. A netisr with indirect
175 * dispatch could work around this, but we'd rather not do that, as it
176 * can introduce other races.
177 *
178 * As such, we exploit the fact that the scope ID is just the interface
179 * index, and embed it in the IPv6 destination address accordingly.
180 * This is potentially NOT VALID for MLDv1 reports, as they
181 * are always sent to the multicast group itself; as MLDv2
182 * reports are always sent to ff02::16, this is not an issue
183 * when MLDv2 is in use.
184 *
185 * This does not however eliminate the LOR when ip6_output() itself
186 * calls in6_setscope() internally whilst MLD_LOCK is held. This will
187 * trigger a LOR warning in WITNESS when the ifnet is detached.
188 *
189 * The right answer is probably to make IF_AFDATA_LOCK an rwlock, given
190 * how it's used across the network stack. Here we're simply exploiting
191 * the fact that MLD runs at a similar layer in the stack to scope6.c.
192 *
193 * VIMAGE:
194 * * Each in6_multi corresponds to an ifp, and each ifp corresponds
195 * to a vnet in ifp->if_vnet.
196 */
197 static struct mtx mld_mtx;
198 static MALLOC_DEFINE(M_MLD, "mld", "mld state");
199
200 #define MLD_EMBEDSCOPE(pin6, zoneid) \
201 if (IN6_IS_SCOPE_LINKLOCAL(pin6) || \
202 IN6_IS_ADDR_MC_INTFACELOCAL(pin6)) \
203 (pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF) \
204
205 /*
206 * VIMAGE-wide globals.
207 */
208 static VNET_DEFINE(struct timeval, mld_gsrdelay) = {10, 0};
209 static VNET_DEFINE(LIST_HEAD(, mld_ifsoftc), mli_head);
210 static VNET_DEFINE(int, interface_timers_running6);
211 static VNET_DEFINE(int, state_change_timers_running6);
212 static VNET_DEFINE(int, current_state_timers_running6);
213
214 #define V_mld_gsrdelay VNET(mld_gsrdelay)
215 #define V_mli_head VNET(mli_head)
216 #define V_interface_timers_running6 VNET(interface_timers_running6)
217 #define V_state_change_timers_running6 VNET(state_change_timers_running6)
218 #define V_current_state_timers_running6 VNET(current_state_timers_running6)
219
220 SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */
221
222 SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW, 0,
223 "IPv6 Multicast Listener Discovery");
224
225 /*
226 * Virtualized sysctls.
227 */
228 SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay,
229 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
230 &VNET_NAME(mld_gsrdelay.tv_sec), 0, sysctl_mld_gsr, "I",
231 "Rate limit for MLDv2 Group-and-Source queries in seconds");
232
233 /*
234 * Non-virtualized sysctls.
235 */
236 static SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo,
237 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_mld_ifinfo,
238 "Per-interface MLDv2 state");
239
240 static int mld_v1enable = 1;
241 SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RWTUN,
242 &mld_v1enable, 0, "Enable fallback to MLDv1");
243
244 static int mld_use_allow = 1;
245 SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RWTUN,
246 &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
247
248 /*
249 * Packed Router Alert option structure declaration.
250 */
251 struct mld_raopt {
252 struct ip6_hbh hbh;
253 struct ip6_opt pad;
254 struct ip6_opt_router ra;
255 } __packed;
256
257 /*
258 * Router Alert hop-by-hop option header.
259 */
260 static struct mld_raopt mld_ra = {
261 .hbh = { 0, 0 },
262 .pad = { .ip6o_type = IP6OPT_PADN, 0 },
263 .ra = {
264 .ip6or_type = IP6OPT_ROUTER_ALERT,
265 .ip6or_len = IP6OPT_RTALERT_LEN - 2,
266 .ip6or_value[0] = ((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
267 .ip6or_value[1] = (IP6OPT_RTALERT_MLD & 0xFF)
268 }
269 };
270 static struct ip6_pktopts mld_po;
271
272 static __inline void
273 mld_save_context(struct mbuf *m, struct ifnet *ifp)
274 {
275
276 #ifdef VIMAGE
277 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
278 #endif /* VIMAGE */
279 m->m_pkthdr.flowid = ifp->if_index;
280 }
281
282 static __inline void
283 mld_scrub_context(struct mbuf *m)
284 {
285
286 m->m_pkthdr.PH_loc.ptr = NULL;
287 m->m_pkthdr.flowid = 0;
288 }
289
290 /*
291 * Restore context from a queued output chain.
292 * Return saved ifindex.
293 *
294 * VIMAGE: The assertion is there to make sure that we
295 * actually called CURVNET_SET() with what's in the mbuf chain.
296 */
297 static __inline uint32_t
298 mld_restore_context(struct mbuf *m)
299 {
300
301 #if defined(VIMAGE) && defined(INVARIANTS)
302 KASSERT(curvnet == m->m_pkthdr.PH_loc.ptr,
303 ("%s: called when curvnet was not restored: cuvnet %p m ptr %p",
304 __func__, curvnet, m->m_pkthdr.PH_loc.ptr));
305 #endif
306 return (m->m_pkthdr.flowid);
307 }
308
309 /*
310 * Retrieve or set threshold between group-source queries in seconds.
311 *
312 * VIMAGE: Assume curvnet set by caller.
313 * SMPng: NOTE: Serialized by MLD lock.
314 */
315 static int
316 sysctl_mld_gsr(SYSCTL_HANDLER_ARGS)
317 {
318 int error;
319 int i;
320
321 error = sysctl_wire_old_buffer(req, sizeof(int));
322 if (error)
323 return (error);
324
325 MLD_LOCK();
326
327 i = V_mld_gsrdelay.tv_sec;
328
329 error = sysctl_handle_int(oidp, &i, 0, req);
330 if (error || !req->newptr)
331 goto out_locked;
332
333 if (i < -1 || i >= 60) {
334 error = EINVAL;
335 goto out_locked;
336 }
337
338 CTR2(KTR_MLD, "change mld_gsrdelay from %d to %d",
339 V_mld_gsrdelay.tv_sec, i);
340 V_mld_gsrdelay.tv_sec = i;
341
342 out_locked:
343 MLD_UNLOCK();
344 return (error);
345 }
346
347 /*
348 * Expose struct mld_ifsoftc to userland, keyed by ifindex.
349 * For use by ifmcstat(8).
350 *
351 * SMPng: NOTE: Does an unlocked ifindex space read.
352 * VIMAGE: Assume curvnet set by caller. The node handler itself
353 * is not directly virtualized.
354 */
355 static int
356 sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS)
357 {
358 int *name;
359 int error;
360 u_int namelen;
361 struct ifnet *ifp;
362 struct mld_ifsoftc *mli;
363
364 name = (int *)arg1;
365 namelen = arg2;
366
367 if (req->newptr != NULL)
368 return (EPERM);
369
370 if (namelen != 1)
371 return (EINVAL);
372
373 error = sysctl_wire_old_buffer(req, sizeof(struct mld_ifinfo));
374 if (error)
375 return (error);
376
377 IN6_MULTI_LOCK();
378 MLD_LOCK();
379
380 if (name[0] <= 0 || name[0] > V_if_index) {
381 error = ENOENT;
382 goto out_locked;
383 }
384
385 error = ENOENT;
386
387 ifp = ifnet_byindex(name[0]);
388 if (ifp == NULL)
389 goto out_locked;
390
391 LIST_FOREACH(mli, &V_mli_head, mli_link) {
392 if (ifp == mli->mli_ifp) {
393 struct mld_ifinfo info;
394
395 info.mli_version = mli->mli_version;
396 info.mli_v1_timer = mli->mli_v1_timer;
397 info.mli_v2_timer = mli->mli_v2_timer;
398 info.mli_flags = mli->mli_flags;
399 info.mli_rv = mli->mli_rv;
400 info.mli_qi = mli->mli_qi;
401 info.mli_qri = mli->mli_qri;
402 info.mli_uri = mli->mli_uri;
403 error = SYSCTL_OUT(req, &info, sizeof(info));
404 break;
405 }
406 }
407
408 out_locked:
409 MLD_UNLOCK();
410 IN6_MULTI_UNLOCK();
411 return (error);
412 }
413
414 /*
415 * Dispatch an entire queue of pending packet chains.
416 * VIMAGE: Assumes the vnet pointer has been set.
417 */
418 static void
419 mld_dispatch_queue(struct mbufq *mq, int limit)
420 {
421 struct mbuf *m;
422
423 while ((m = mbufq_dequeue(mq)) != NULL) {
424 CTR3(KTR_MLD, "%s: dispatch %p from %p", __func__, mq, m);
425 mld_dispatch_packet(m);
426 if (--limit == 0)
427 break;
428 }
429 }
430
431 /*
432 * Filter outgoing MLD report state by group.
433 *
434 * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
435 * and node-local addresses. However, kernel and socket consumers
436 * always embed the KAME scope ID in the address provided, so strip it
437 * when performing comparison.
438 * Note: This is not the same as the *multicast* scope.
439 *
440 * Return zero if the given group is one for which MLD reports
441 * should be suppressed, or non-zero if reports should be issued.
442 */
443 static __inline int
444 mld_is_addr_reported(const struct in6_addr *addr)
445 {
446
447 KASSERT(IN6_IS_ADDR_MULTICAST(addr), ("%s: not multicast", __func__));
448
449 if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL)
450 return (0);
451
452 if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) {
453 struct in6_addr tmp = *addr;
454 in6_clearscope(&tmp);
455 if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes))
456 return (0);
457 }
458
459 return (1);
460 }
461
462 /*
463 * Attach MLD when PF_INET6 is attached to an interface.
464 *
465 * SMPng: Normally called with IF_AFDATA_LOCK held.
466 */
467 struct mld_ifsoftc *
468 mld_domifattach(struct ifnet *ifp)
469 {
470 struct mld_ifsoftc *mli;
471
472 CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
473 __func__, ifp, if_name(ifp));
474
475 MLD_LOCK();
476
477 mli = mli_alloc_locked(ifp);
478 if (!(ifp->if_flags & IFF_MULTICAST))
479 mli->mli_flags |= MLIF_SILENT;
480 if (mld_use_allow)
481 mli->mli_flags |= MLIF_USEALLOW;
482
483 MLD_UNLOCK();
484
485 return (mli);
486 }
487
488 /*
489 * VIMAGE: assume curvnet set by caller.
490 */
491 static struct mld_ifsoftc *
492 mli_alloc_locked(/*const*/ struct ifnet *ifp)
493 {
494 struct mld_ifsoftc *mli;
495
496 MLD_LOCK_ASSERT();
497
498 mli = malloc(sizeof(struct mld_ifsoftc), M_MLD, M_NOWAIT|M_ZERO);
499 if (mli == NULL)
500 goto out;
501
502 mli->mli_ifp = ifp;
503 mli->mli_version = MLD_VERSION_2;
504 mli->mli_flags = 0;
505 mli->mli_rv = MLD_RV_INIT;
506 mli->mli_qi = MLD_QI_INIT;
507 mli->mli_qri = MLD_QRI_INIT;
508 mli->mli_uri = MLD_URI_INIT;
509 SLIST_INIT(&mli->mli_relinmhead);
510 mbufq_init(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS);
511
512 LIST_INSERT_HEAD(&V_mli_head, mli, mli_link);
513
514 CTR2(KTR_MLD, "allocate mld_ifsoftc for ifp %p(%s)",
515 ifp, if_name(ifp));
516
517 out:
518 return (mli);
519 }
520
521 /*
522 * Hook for ifdetach.
523 *
524 * NOTE: Some finalization tasks need to run before the protocol domain
525 * is detached, but also before the link layer does its cleanup.
526 * Run before link-layer cleanup; cleanup groups, but do not free MLD state.
527 *
528 * SMPng: Caller must hold IN6_MULTI_LOCK().
529 * Must take IF_ADDR_LOCK() to cover if_multiaddrs iterator.
530 * XXX This routine is also bitten by unlocked ifma_protospec access.
531 */
532 void
533 mld_ifdetach(struct ifnet *ifp)
534 {
535 struct mld_ifsoftc *mli;
536 struct ifmultiaddr *ifma;
537 struct in6_multi *inm, *tinm;
538
539 CTR3(KTR_MLD, "%s: called for ifp %p(%s)", __func__, ifp,
540 if_name(ifp));
541
542 IN6_MULTI_LOCK_ASSERT();
543 MLD_LOCK();
544
545 mli = MLD_IFINFO(ifp);
546 if (mli->mli_version == MLD_VERSION_2) {
547 IF_ADDR_RLOCK(ifp);
548 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
549 if (ifma->ifma_addr->sa_family != AF_INET6 ||
550 ifma->ifma_protospec == NULL)
551 continue;
552 inm = (struct in6_multi *)ifma->ifma_protospec;
553 if (inm->in6m_state == MLD_LEAVING_MEMBER) {
554 SLIST_INSERT_HEAD(&mli->mli_relinmhead,
555 inm, in6m_nrele);
556 }
557 in6m_clear_recorded(inm);
558 }
559 IF_ADDR_RUNLOCK(ifp);
560 SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead, in6m_nrele,
561 tinm) {
562 SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
563 in6m_release_locked(inm);
564 }
565 }
566
567 MLD_UNLOCK();
568 }
569
570 /*
571 * Hook for domifdetach.
572 * Runs after link-layer cleanup; free MLD state.
573 *
574 * SMPng: Normally called with IF_AFDATA_LOCK held.
575 */
576 void
577 mld_domifdetach(struct ifnet *ifp)
578 {
579
580 CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
581 __func__, ifp, if_name(ifp));
582
583 MLD_LOCK();
584 mli_delete_locked(ifp);
585 MLD_UNLOCK();
586 }
587
588 static void
589 mli_delete_locked(const struct ifnet *ifp)
590 {
591 struct mld_ifsoftc *mli, *tmli;
592
593 CTR3(KTR_MLD, "%s: freeing mld_ifsoftc for ifp %p(%s)",
594 __func__, ifp, if_name(ifp));
595
596 MLD_LOCK_ASSERT();
597
598 LIST_FOREACH_SAFE(mli, &V_mli_head, mli_link, tmli) {
599 if (mli->mli_ifp == ifp) {
600 /*
601 * Free deferred General Query responses.
602 */
603 mbufq_drain(&mli->mli_gq);
604
605 LIST_REMOVE(mli, mli_link);
606
607 KASSERT(SLIST_EMPTY(&mli->mli_relinmhead),
608 ("%s: there are dangling in_multi references",
609 __func__));
610
611 free(mli, M_MLD);
612 return;
613 }
614 }
615 }
616
617 /*
618 * Process a received MLDv1 general or address-specific query.
619 * Assumes that the query header has been pulled up to sizeof(mld_hdr).
620 *
621 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
622 * mld_addr. This is OK as we own the mbuf chain.
623 */
624 static int
625 mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
626 /*const*/ struct mld_hdr *mld)
627 {
628 struct ifmultiaddr *ifma;
629 struct mld_ifsoftc *mli;
630 struct in6_multi *inm;
631 int is_general_query;
632 uint16_t timer;
633 #ifdef KTR
634 char ip6tbuf[INET6_ADDRSTRLEN];
635 #endif
636
637 is_general_query = 0;
638
639 if (!mld_v1enable) {
640 CTR3(KTR_MLD, "ignore v1 query %s on ifp %p(%s)",
641 ip6_sprintf(ip6tbuf, &mld->mld_addr),
642 ifp, if_name(ifp));
643 return (0);
644 }
645
646 /*
647 * RFC3810 Section 6.2: MLD queries must originate from
648 * a router's link-local address.
649 */
650 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
651 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
652 ip6_sprintf(ip6tbuf, &ip6->ip6_src),
653 ifp, if_name(ifp));
654 return (0);
655 }
656
657 /*
658 * Do address field validation upfront before we accept
659 * the query.
660 */
661 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
662 /*
663 * MLDv1 General Query.
664 * If this was not sent to the all-nodes group, ignore it.
665 */
666 struct in6_addr dst;
667
668 dst = ip6->ip6_dst;
669 in6_clearscope(&dst);
670 if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes))
671 return (EINVAL);
672 is_general_query = 1;
673 } else {
674 /*
675 * Embed scope ID of receiving interface in MLD query for
676 * lookup whilst we don't hold other locks.
677 */
678 in6_setscope(&mld->mld_addr, ifp, NULL);
679 }
680
681 IN6_MULTI_LOCK();
682 MLD_LOCK();
683
684 /*
685 * Switch to MLDv1 host compatibility mode.
686 */
687 mli = MLD_IFINFO(ifp);
688 KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
689 mld_set_version(mli, MLD_VERSION_1);
690
691 timer = (ntohs(mld->mld_maxdelay) * PR_FASTHZ) / MLD_TIMER_SCALE;
692 if (timer == 0)
693 timer = 1;
694
695 IF_ADDR_RLOCK(ifp);
696 if (is_general_query) {
697 /*
698 * For each reporting group joined on this
699 * interface, kick the report timer.
700 */
701 CTR2(KTR_MLD, "process v1 general query on ifp %p(%s)",
702 ifp, if_name(ifp));
703 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
704 if (ifma->ifma_addr->sa_family != AF_INET6 ||
705 ifma->ifma_protospec == NULL)
706 continue;
707 inm = (struct in6_multi *)ifma->ifma_protospec;
708 mld_v1_update_group(inm, timer);
709 }
710 } else {
711 /*
712 * MLDv1 Group-Specific Query.
713 * If this is a group-specific MLDv1 query, we need only
714 * look up the single group to process it.
715 */
716 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
717 if (inm != NULL) {
718 CTR3(KTR_MLD, "process v1 query %s on ifp %p(%s)",
719 ip6_sprintf(ip6tbuf, &mld->mld_addr),
720 ifp, if_name(ifp));
721 mld_v1_update_group(inm, timer);
722 }
723 /* XXX Clear embedded scope ID as userland won't expect it. */
724 in6_clearscope(&mld->mld_addr);
725 }
726
727 IF_ADDR_RUNLOCK(ifp);
728 MLD_UNLOCK();
729 IN6_MULTI_UNLOCK();
730
731 return (0);
732 }
733
734 /*
735 * Update the report timer on a group in response to an MLDv1 query.
736 *
737 * If we are becoming the reporting member for this group, start the timer.
738 * If we already are the reporting member for this group, and timer is
739 * below the threshold, reset it.
740 *
741 * We may be updating the group for the first time since we switched
742 * to MLDv2. If we are, then we must clear any recorded source lists,
743 * and transition to REPORTING state; the group timer is overloaded
744 * for group and group-source query responses.
745 *
746 * Unlike MLDv2, the delay per group should be jittered
747 * to avoid bursts of MLDv1 reports.
748 */
749 static void
750 mld_v1_update_group(struct in6_multi *inm, const int timer)
751 {
752 #ifdef KTR
753 char ip6tbuf[INET6_ADDRSTRLEN];
754 #endif
755
756 CTR4(KTR_MLD, "%s: %s/%s timer=%d", __func__,
757 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
758 if_name(inm->in6m_ifp), timer);
759
760 IN6_MULTI_LOCK_ASSERT();
761
762 switch (inm->in6m_state) {
763 case MLD_NOT_MEMBER:
764 case MLD_SILENT_MEMBER:
765 break;
766 case MLD_REPORTING_MEMBER:
767 if (inm->in6m_timer != 0 &&
768 inm->in6m_timer <= timer) {
769 CTR1(KTR_MLD, "%s: REPORTING and timer running, "
770 "skipping.", __func__);
771 break;
772 }
773 /* FALLTHROUGH */
774 case MLD_SG_QUERY_PENDING_MEMBER:
775 case MLD_G_QUERY_PENDING_MEMBER:
776 case MLD_IDLE_MEMBER:
777 case MLD_LAZY_MEMBER:
778 case MLD_AWAKENING_MEMBER:
779 CTR1(KTR_MLD, "%s: ->REPORTING", __func__);
780 inm->in6m_state = MLD_REPORTING_MEMBER;
781 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
782 V_current_state_timers_running6 = 1;
783 break;
784 case MLD_SLEEPING_MEMBER:
785 CTR1(KTR_MLD, "%s: ->AWAKENING", __func__);
786 inm->in6m_state = MLD_AWAKENING_MEMBER;
787 break;
788 case MLD_LEAVING_MEMBER:
789 break;
790 }
791 }
792
793 /*
794 * Process a received MLDv2 general, group-specific or
795 * group-and-source-specific query.
796 *
797 * Assumes that the query header has been pulled up to sizeof(mldv2_query).
798 *
799 * Return 0 if successful, otherwise an appropriate error code is returned.
800 */
801 static int
802 mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
803 struct mbuf *m, const int off, const int icmp6len)
804 {
805 struct mld_ifsoftc *mli;
806 struct mldv2_query *mld;
807 struct in6_multi *inm;
808 uint32_t maxdelay, nsrc, qqi;
809 int is_general_query;
810 uint16_t timer;
811 uint8_t qrv;
812 #ifdef KTR
813 char ip6tbuf[INET6_ADDRSTRLEN];
814 #endif
815
816 is_general_query = 0;
817
818 /*
819 * RFC3810 Section 6.2: MLD queries must originate from
820 * a router's link-local address.
821 */
822 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
823 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
824 ip6_sprintf(ip6tbuf, &ip6->ip6_src),
825 ifp, if_name(ifp));
826 return (0);
827 }
828
829 CTR2(KTR_MLD, "input v2 query on ifp %p(%s)", ifp, if_name(ifp));
830
831 mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off);
832
833 maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */
834 if (maxdelay >= 32768) {
835 maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) <<
836 (MLD_MRC_EXP(maxdelay) + 3);
837 }
838 timer = (maxdelay * PR_FASTHZ) / MLD_TIMER_SCALE;
839 if (timer == 0)
840 timer = 1;
841
842 qrv = MLD_QRV(mld->mld_misc);
843 if (qrv < 2) {
844 CTR3(KTR_MLD, "%s: clamping qrv %d to %d", __func__,
845 qrv, MLD_RV_INIT);
846 qrv = MLD_RV_INIT;
847 }
848
849 qqi = mld->mld_qqi;
850 if (qqi >= 128) {
851 qqi = MLD_QQIC_MANT(mld->mld_qqi) <<
852 (MLD_QQIC_EXP(mld->mld_qqi) + 3);
853 }
854
855 nsrc = ntohs(mld->mld_numsrc);
856 if (nsrc > MLD_MAX_GS_SOURCES)
857 return (EMSGSIZE);
858 if (icmp6len < sizeof(struct mldv2_query) +
859 (nsrc * sizeof(struct in6_addr)))
860 return (EMSGSIZE);
861
862 /*
863 * Do further input validation upfront to avoid resetting timers
864 * should we need to discard this query.
865 */
866 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
867 /*
868 * A general query with a source list has undefined
869 * behaviour; discard it.
870 */
871 if (nsrc > 0)
872 return (EINVAL);
873 is_general_query = 1;
874 } else {
875 /*
876 * Embed scope ID of receiving interface in MLD query for
877 * lookup whilst we don't hold other locks (due to KAME
878 * locking lameness). We own this mbuf chain just now.
879 */
880 in6_setscope(&mld->mld_addr, ifp, NULL);
881 }
882
883 IN6_MULTI_LOCK();
884 MLD_LOCK();
885
886 mli = MLD_IFINFO(ifp);
887 KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
888
889 /*
890 * Discard the v2 query if we're in Compatibility Mode.
891 * The RFC is pretty clear that hosts need to stay in MLDv1 mode
892 * until the Old Version Querier Present timer expires.
893 */
894 if (mli->mli_version != MLD_VERSION_2)
895 goto out_locked;
896
897 mld_set_version(mli, MLD_VERSION_2);
898 mli->mli_rv = qrv;
899 mli->mli_qi = qqi;
900 mli->mli_qri = maxdelay;
901
902 CTR4(KTR_MLD, "%s: qrv %d qi %d maxdelay %d", __func__, qrv, qqi,
903 maxdelay);
904
905 if (is_general_query) {
906 /*
907 * MLDv2 General Query.
908 *
909 * Schedule a current-state report on this ifp for
910 * all groups, possibly containing source lists.
911 *
912 * If there is a pending General Query response
913 * scheduled earlier than the selected delay, do
914 * not schedule any other reports.
915 * Otherwise, reset the interface timer.
916 */
917 CTR2(KTR_MLD, "process v2 general query on ifp %p(%s)",
918 ifp, if_name(ifp));
919 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
920 mli->mli_v2_timer = MLD_RANDOM_DELAY(timer);
921 V_interface_timers_running6 = 1;
922 }
923 } else {
924 /*
925 * MLDv2 Group-specific or Group-and-source-specific Query.
926 *
927 * Group-source-specific queries are throttled on
928 * a per-group basis to defeat denial-of-service attempts.
929 * Queries for groups we are not a member of on this
930 * link are simply ignored.
931 */
932 IF_ADDR_RLOCK(ifp);
933 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
934 if (inm == NULL) {
935 IF_ADDR_RUNLOCK(ifp);
936 goto out_locked;
937 }
938 if (nsrc > 0) {
939 if (!ratecheck(&inm->in6m_lastgsrtv,
940 &V_mld_gsrdelay)) {
941 CTR1(KTR_MLD, "%s: GS query throttled.",
942 __func__);
943 IF_ADDR_RUNLOCK(ifp);
944 goto out_locked;
945 }
946 }
947 CTR2(KTR_MLD, "process v2 group query on ifp %p(%s)",
948 ifp, if_name(ifp));
949 /*
950 * If there is a pending General Query response
951 * scheduled sooner than the selected delay, no
952 * further report need be scheduled.
953 * Otherwise, prepare to respond to the
954 * group-specific or group-and-source query.
955 */
956 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer)
957 mld_v2_process_group_query(inm, mli, timer, m, off);
958
959 /* XXX Clear embedded scope ID as userland won't expect it. */
960 in6_clearscope(&mld->mld_addr);
961 IF_ADDR_RUNLOCK(ifp);
962 }
963
964 out_locked:
965 MLD_UNLOCK();
966 IN6_MULTI_UNLOCK();
967
968 return (0);
969 }
970
971 /*
972 * Process a received MLDv2 group-specific or group-and-source-specific
973 * query.
974 * Return <0 if any error occurred. Currently this is ignored.
975 */
976 static int
977 mld_v2_process_group_query(struct in6_multi *inm, struct mld_ifsoftc *mli,
978 int timer, struct mbuf *m0, const int off)
979 {
980 struct mldv2_query *mld;
981 int retval;
982 uint16_t nsrc;
983
984 IN6_MULTI_LOCK_ASSERT();
985 MLD_LOCK_ASSERT();
986
987 retval = 0;
988 mld = (struct mldv2_query *)(mtod(m0, uint8_t *) + off);
989
990 switch (inm->in6m_state) {
991 case MLD_NOT_MEMBER:
992 case MLD_SILENT_MEMBER:
993 case MLD_SLEEPING_MEMBER:
994 case MLD_LAZY_MEMBER:
995 case MLD_AWAKENING_MEMBER:
996 case MLD_IDLE_MEMBER:
997 case MLD_LEAVING_MEMBER:
998 return (retval);
999 break;
1000 case MLD_REPORTING_MEMBER:
1001 case MLD_G_QUERY_PENDING_MEMBER:
1002 case MLD_SG_QUERY_PENDING_MEMBER:
1003 break;
1004 }
1005
1006 nsrc = ntohs(mld->mld_numsrc);
1007
1008 /*
1009 * Deal with group-specific queries upfront.
1010 * If any group query is already pending, purge any recorded
1011 * source-list state if it exists, and schedule a query response
1012 * for this group-specific query.
1013 */
1014 if (nsrc == 0) {
1015 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
1016 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
1017 in6m_clear_recorded(inm);
1018 timer = min(inm->in6m_timer, timer);
1019 }
1020 inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER;
1021 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1022 V_current_state_timers_running6 = 1;
1023 return (retval);
1024 }
1025
1026 /*
1027 * Deal with the case where a group-and-source-specific query has
1028 * been received but a group-specific query is already pending.
1029 */
1030 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) {
1031 timer = min(inm->in6m_timer, timer);
1032 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1033 V_current_state_timers_running6 = 1;
1034 return (retval);
1035 }
1036
1037 /*
1038 * Finally, deal with the case where a group-and-source-specific
1039 * query has been received, where a response to a previous g-s-r
1040 * query exists, or none exists.
1041 * In this case, we need to parse the source-list which the Querier
1042 * has provided us with and check if we have any source list filter
1043 * entries at T1 for these sources. If we do not, there is no need
1044 * schedule a report and the query may be dropped.
1045 * If we do, we must record them and schedule a current-state
1046 * report for those sources.
1047 */
1048 if (inm->in6m_nsrc > 0) {
1049 struct mbuf *m;
1050 uint8_t *sp;
1051 int i, nrecorded;
1052 int soff;
1053
1054 m = m0;
1055 soff = off + sizeof(struct mldv2_query);
1056 nrecorded = 0;
1057 for (i = 0; i < nsrc; i++) {
1058 sp = mtod(m, uint8_t *) + soff;
1059 retval = in6m_record_source(inm,
1060 (const struct in6_addr *)sp);
1061 if (retval < 0)
1062 break;
1063 nrecorded += retval;
1064 soff += sizeof(struct in6_addr);
1065 if (soff >= m->m_len) {
1066 soff = soff - m->m_len;
1067 m = m->m_next;
1068 if (m == NULL)
1069 break;
1070 }
1071 }
1072 if (nrecorded > 0) {
1073 CTR1(KTR_MLD,
1074 "%s: schedule response to SG query", __func__);
1075 inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER;
1076 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1077 V_current_state_timers_running6 = 1;
1078 }
1079 }
1080
1081 return (retval);
1082 }
1083
1084 /*
1085 * Process a received MLDv1 host membership report.
1086 * Assumes mld points to mld_hdr in pulled up mbuf chain.
1087 *
1088 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
1089 * mld_addr. This is OK as we own the mbuf chain.
1090 */
1091 static int
1092 mld_v1_input_report(struct ifnet *ifp, const struct ip6_hdr *ip6,
1093 /*const*/ struct mld_hdr *mld)
1094 {
1095 struct in6_addr src, dst;
1096 struct in6_ifaddr *ia;
1097 struct in6_multi *inm;
1098 #ifdef KTR
1099 char ip6tbuf[INET6_ADDRSTRLEN];
1100 #endif
1101
1102 if (!mld_v1enable) {
1103 CTR3(KTR_MLD, "ignore v1 report %s on ifp %p(%s)",
1104 ip6_sprintf(ip6tbuf, &mld->mld_addr),
1105 ifp, if_name(ifp));
1106 return (0);
1107 }
1108
1109 if (ifp->if_flags & IFF_LOOPBACK)
1110 return (0);
1111
1112 /*
1113 * MLDv1 reports must originate from a host's link-local address,
1114 * or the unspecified address (when booting).
1115 */
1116 src = ip6->ip6_src;
1117 in6_clearscope(&src);
1118 if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) {
1119 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
1120 ip6_sprintf(ip6tbuf, &ip6->ip6_src),
1121 ifp, if_name(ifp));
1122 return (EINVAL);
1123 }
1124
1125 /*
1126 * RFC2710 Section 4: MLDv1 reports must pertain to a multicast
1127 * group, and must be directed to the group itself.
1128 */
1129 dst = ip6->ip6_dst;
1130 in6_clearscope(&dst);
1131 if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) ||
1132 !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) {
1133 CTR3(KTR_MLD, "ignore v1 query dst %s on ifp %p(%s)",
1134 ip6_sprintf(ip6tbuf, &ip6->ip6_dst),
1135 ifp, if_name(ifp));
1136 return (EINVAL);
1137 }
1138
1139 /*
1140 * Make sure we don't hear our own membership report, as fast
1141 * leave requires knowing that we are the only member of a
1142 * group. Assume we used the link-local address if available,
1143 * otherwise look for ::.
1144 *
1145 * XXX Note that scope ID comparison is needed for the address
1146 * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
1147 * performed for the on-wire address.
1148 */
1149 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
1150 if ((ia && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia))) ||
1151 (ia == NULL && IN6_IS_ADDR_UNSPECIFIED(&src))) {
1152 if (ia != NULL)
1153 ifa_free(&ia->ia_ifa);
1154 return (0);
1155 }
1156 if (ia != NULL)
1157 ifa_free(&ia->ia_ifa);
1158
1159 CTR3(KTR_MLD, "process v1 report %s on ifp %p(%s)",
1160 ip6_sprintf(ip6tbuf, &mld->mld_addr), ifp, if_name(ifp));
1161
1162 /*
1163 * Embed scope ID of receiving interface in MLD query for lookup
1164 * whilst we don't hold other locks (due to KAME locking lameness).
1165 */
1166 if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr))
1167 in6_setscope(&mld->mld_addr, ifp, NULL);
1168
1169 IN6_MULTI_LOCK();
1170 MLD_LOCK();
1171 IF_ADDR_RLOCK(ifp);
1172
1173 /*
1174 * MLDv1 report suppression.
1175 * If we are a member of this group, and our membership should be
1176 * reported, and our group timer is pending or about to be reset,
1177 * stop our group timer by transitioning to the 'lazy' state.
1178 */
1179 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
1180 if (inm != NULL) {
1181 struct mld_ifsoftc *mli;
1182
1183 mli = inm->in6m_mli;
1184 KASSERT(mli != NULL,
1185 ("%s: no mli for ifp %p", __func__, ifp));
1186
1187 /*
1188 * If we are in MLDv2 host mode, do not allow the
1189 * other host's MLDv1 report to suppress our reports.
1190 */
1191 if (mli->mli_version == MLD_VERSION_2)
1192 goto out_locked;
1193
1194 inm->in6m_timer = 0;
1195
1196 switch (inm->in6m_state) {
1197 case MLD_NOT_MEMBER:
1198 case MLD_SILENT_MEMBER:
1199 case MLD_SLEEPING_MEMBER:
1200 break;
1201 case MLD_REPORTING_MEMBER:
1202 case MLD_IDLE_MEMBER:
1203 case MLD_AWAKENING_MEMBER:
1204 CTR3(KTR_MLD,
1205 "report suppressed for %s on ifp %p(%s)",
1206 ip6_sprintf(ip6tbuf, &mld->mld_addr),
1207 ifp, if_name(ifp));
1208 case MLD_LAZY_MEMBER:
1209 inm->in6m_state = MLD_LAZY_MEMBER;
1210 break;
1211 case MLD_G_QUERY_PENDING_MEMBER:
1212 case MLD_SG_QUERY_PENDING_MEMBER:
1213 case MLD_LEAVING_MEMBER:
1214 break;
1215 }
1216 }
1217
1218 out_locked:
1219 IF_ADDR_RUNLOCK(ifp);
1220 MLD_UNLOCK();
1221 IN6_MULTI_UNLOCK();
1222
1223 /* XXX Clear embedded scope ID as userland won't expect it. */
1224 in6_clearscope(&mld->mld_addr);
1225
1226 return (0);
1227 }
1228
1229 /*
1230 * MLD input path.
1231 *
1232 * Assume query messages which fit in a single ICMPv6 message header
1233 * have been pulled up.
1234 * Assume that userland will want to see the message, even if it
1235 * otherwise fails kernel input validation; do not free it.
1236 * Pullup may however free the mbuf chain m if it fails.
1237 *
1238 * Return IPPROTO_DONE if we freed m. Otherwise, return 0.
1239 */
1240 int
1241 mld_input(struct mbuf *m, int off, int icmp6len)
1242 {
1243 struct ifnet *ifp;
1244 struct ip6_hdr *ip6;
1245 struct mld_hdr *mld;
1246 int mldlen;
1247
1248 CTR3(KTR_MLD, "%s: called w/mbuf (%p,%d)", __func__, m, off);
1249
1250 ifp = m->m_pkthdr.rcvif;
1251
1252 ip6 = mtod(m, struct ip6_hdr *);
1253
1254 /* Pullup to appropriate size. */
1255 mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
1256 if (mld->mld_type == MLD_LISTENER_QUERY &&
1257 icmp6len >= sizeof(struct mldv2_query)) {
1258 mldlen = sizeof(struct mldv2_query);
1259 } else {
1260 mldlen = sizeof(struct mld_hdr);
1261 }
1262 IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen);
1263 if (mld == NULL) {
1264 ICMP6STAT_INC(icp6s_badlen);
1265 return (IPPROTO_DONE);
1266 }
1267
1268 /*
1269 * Userland needs to see all of this traffic for implementing
1270 * the endpoint discovery portion of multicast routing.
1271 */
1272 switch (mld->mld_type) {
1273 case MLD_LISTENER_QUERY:
1274 icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
1275 if (icmp6len == sizeof(struct mld_hdr)) {
1276 if (mld_v1_input_query(ifp, ip6, mld) != 0)
1277 return (0);
1278 } else if (icmp6len >= sizeof(struct mldv2_query)) {
1279 if (mld_v2_input_query(ifp, ip6, m, off,
1280 icmp6len) != 0)
1281 return (0);
1282 }
1283 break;
1284 case MLD_LISTENER_REPORT:
1285 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1286 if (mld_v1_input_report(ifp, ip6, mld) != 0)
1287 return (0);
1288 break;
1289 case MLDV2_LISTENER_REPORT:
1290 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1291 break;
1292 case MLD_LISTENER_DONE:
1293 icmp6_ifstat_inc(ifp, ifs6_in_mlddone);
1294 break;
1295 default:
1296 break;
1297 }
1298
1299 return (0);
1300 }
1301
1302 /*
1303 * Fast timeout handler (global).
1304 * VIMAGE: Timeout handlers are expected to service all vimages.
1305 */
1306 void
1307 mld_fasttimo(void)
1308 {
1309 VNET_ITERATOR_DECL(vnet_iter);
1310
1311 VNET_LIST_RLOCK_NOSLEEP();
1312 VNET_FOREACH(vnet_iter) {
1313 CURVNET_SET(vnet_iter);
1314 mld_fasttimo_vnet();
1315 CURVNET_RESTORE();
1316 }
1317 VNET_LIST_RUNLOCK_NOSLEEP();
1318 }
1319
1320 /*
1321 * Fast timeout handler (per-vnet).
1322 *
1323 * VIMAGE: Assume caller has set up our curvnet.
1324 */
1325 static void
1326 mld_fasttimo_vnet(void)
1327 {
1328 struct mbufq scq; /* State-change packets */
1329 struct mbufq qrq; /* Query response packets */
1330 struct ifnet *ifp;
1331 struct mld_ifsoftc *mli;
1332 struct ifmultiaddr *ifma;
1333 struct in6_multi *inm, *tinm;
1334 int uri_fasthz;
1335
1336 uri_fasthz = 0;
1337
1338 /*
1339 * Quick check to see if any work needs to be done, in order to
1340 * minimize the overhead of fasttimo processing.
1341 * SMPng: XXX Unlocked reads.
1342 */
1343 if (!V_current_state_timers_running6 &&
1344 !V_interface_timers_running6 &&
1345 !V_state_change_timers_running6)
1346 return;
1347
1348 IN6_MULTI_LOCK();
1349 MLD_LOCK();
1350
1351 /*
1352 * MLDv2 General Query response timer processing.
1353 */
1354 if (V_interface_timers_running6) {
1355 CTR1(KTR_MLD, "%s: interface timers running", __func__);
1356
1357 V_interface_timers_running6 = 0;
1358 LIST_FOREACH(mli, &V_mli_head, mli_link) {
1359 if (mli->mli_v2_timer == 0) {
1360 /* Do nothing. */
1361 } else if (--mli->mli_v2_timer == 0) {
1362 mld_v2_dispatch_general_query(mli);
1363 } else {
1364 V_interface_timers_running6 = 1;
1365 }
1366 }
1367 }
1368
1369 if (!V_current_state_timers_running6 &&
1370 !V_state_change_timers_running6)
1371 goto out_locked;
1372
1373 V_current_state_timers_running6 = 0;
1374 V_state_change_timers_running6 = 0;
1375
1376 CTR1(KTR_MLD, "%s: state change timers running", __func__);
1377
1378 /*
1379 * MLD host report and state-change timer processing.
1380 * Note: Processing a v2 group timer may remove a node.
1381 */
1382 LIST_FOREACH(mli, &V_mli_head, mli_link) {
1383 ifp = mli->mli_ifp;
1384
1385 if (mli->mli_version == MLD_VERSION_2) {
1386 uri_fasthz = MLD_RANDOM_DELAY(mli->mli_uri *
1387 PR_FASTHZ);
1388 mbufq_init(&qrq, MLD_MAX_G_GS_PACKETS);
1389 mbufq_init(&scq, MLD_MAX_STATE_CHANGE_PACKETS);
1390 }
1391
1392 IF_ADDR_RLOCK(ifp);
1393 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1394 if (ifma->ifma_addr->sa_family != AF_INET6 ||
1395 ifma->ifma_protospec == NULL)
1396 continue;
1397 inm = (struct in6_multi *)ifma->ifma_protospec;
1398 switch (mli->mli_version) {
1399 case MLD_VERSION_1:
1400 mld_v1_process_group_timer(mli, inm);
1401 break;
1402 case MLD_VERSION_2:
1403 mld_v2_process_group_timers(mli, &qrq,
1404 &scq, inm, uri_fasthz);
1405 break;
1406 }
1407 }
1408 IF_ADDR_RUNLOCK(ifp);
1409
1410 switch (mli->mli_version) {
1411 case MLD_VERSION_1:
1412 /*
1413 * Transmit reports for this lifecycle. This
1414 * is done while not holding IF_ADDR_LOCK
1415 * since this can call
1416 * in6ifa_ifpforlinklocal() which locks
1417 * IF_ADDR_LOCK internally as well as
1418 * ip6_output() to transmit a packet.
1419 */
1420 SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead,
1421 in6m_nrele, tinm) {
1422 SLIST_REMOVE_HEAD(&mli->mli_relinmhead,
1423 in6m_nrele);
1424 (void)mld_v1_transmit_report(inm,
1425 MLD_LISTENER_REPORT);
1426 }
1427 break;
1428 case MLD_VERSION_2:
1429 mld_dispatch_queue(&qrq, 0);
1430 mld_dispatch_queue(&scq, 0);
1431
1432 /*
1433 * Free the in_multi reference(s) for
1434 * this lifecycle.
1435 */
1436 SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead,
1437 in6m_nrele, tinm) {
1438 SLIST_REMOVE_HEAD(&mli->mli_relinmhead,
1439 in6m_nrele);
1440 in6m_release_locked(inm);
1441 }
1442 break;
1443 }
1444 }
1445
1446 out_locked:
1447 MLD_UNLOCK();
1448 IN6_MULTI_UNLOCK();
1449 }
1450
1451 /*
1452 * Update host report group timer.
1453 * Will update the global pending timer flags.
1454 */
1455 static void
1456 mld_v1_process_group_timer(struct mld_ifsoftc *mli, struct in6_multi *inm)
1457 {
1458 int report_timer_expired;
1459
1460 IN6_MULTI_LOCK_ASSERT();
1461 MLD_LOCK_ASSERT();
1462
1463 if (inm->in6m_timer == 0) {
1464 report_timer_expired = 0;
1465 } else if (--inm->in6m_timer == 0) {
1466 report_timer_expired = 1;
1467 } else {
1468 V_current_state_timers_running6 = 1;
1469 return;
1470 }
1471
1472 switch (inm->in6m_state) {
1473 case MLD_NOT_MEMBER:
1474 case MLD_SILENT_MEMBER:
1475 case MLD_IDLE_MEMBER:
1476 case MLD_LAZY_MEMBER:
1477 case MLD_SLEEPING_MEMBER:
1478 case MLD_AWAKENING_MEMBER:
1479 break;
1480 case MLD_REPORTING_MEMBER:
1481 if (report_timer_expired) {
1482 inm->in6m_state = MLD_IDLE_MEMBER;
1483 SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
1484 in6m_nrele);
1485 }
1486 break;
1487 case MLD_G_QUERY_PENDING_MEMBER:
1488 case MLD_SG_QUERY_PENDING_MEMBER:
1489 case MLD_LEAVING_MEMBER:
1490 break;
1491 }
1492 }
1493
1494 /*
1495 * Update a group's timers for MLDv2.
1496 * Will update the global pending timer flags.
1497 * Note: Unlocked read from mli.
1498 */
1499 static void
1500 mld_v2_process_group_timers(struct mld_ifsoftc *mli,
1501 struct mbufq *qrq, struct mbufq *scq,
1502 struct in6_multi *inm, const int uri_fasthz)
1503 {
1504 int query_response_timer_expired;
1505 int state_change_retransmit_timer_expired;
1506 #ifdef KTR
1507 char ip6tbuf[INET6_ADDRSTRLEN];
1508 #endif
1509
1510 IN6_MULTI_LOCK_ASSERT();
1511 MLD_LOCK_ASSERT();
1512
1513 query_response_timer_expired = 0;
1514 state_change_retransmit_timer_expired = 0;
1515
1516 /*
1517 * During a transition from compatibility mode back to MLDv2,
1518 * a group record in REPORTING state may still have its group
1519 * timer active. This is a no-op in this function; it is easier
1520 * to deal with it here than to complicate the slow-timeout path.
1521 */
1522 if (inm->in6m_timer == 0) {
1523 query_response_timer_expired = 0;
1524 } else if (--inm->in6m_timer == 0) {
1525 query_response_timer_expired = 1;
1526 } else {
1527 V_current_state_timers_running6 = 1;
1528 }
1529
1530 if (inm->in6m_sctimer == 0) {
1531 state_change_retransmit_timer_expired = 0;
1532 } else if (--inm->in6m_sctimer == 0) {
1533 state_change_retransmit_timer_expired = 1;
1534 } else {
1535 V_state_change_timers_running6 = 1;
1536 }
1537
1538 /* We are in fasttimo, so be quick about it. */
1539 if (!state_change_retransmit_timer_expired &&
1540 !query_response_timer_expired)
1541 return;
1542
1543 switch (inm->in6m_state) {
1544 case MLD_NOT_MEMBER:
1545 case MLD_SILENT_MEMBER:
1546 case MLD_SLEEPING_MEMBER:
1547 case MLD_LAZY_MEMBER:
1548 case MLD_AWAKENING_MEMBER:
1549 case MLD_IDLE_MEMBER:
1550 break;
1551 case MLD_G_QUERY_PENDING_MEMBER:
1552 case MLD_SG_QUERY_PENDING_MEMBER:
1553 /*
1554 * Respond to a previously pending Group-Specific
1555 * or Group-and-Source-Specific query by enqueueing
1556 * the appropriate Current-State report for
1557 * immediate transmission.
1558 */
1559 if (query_response_timer_expired) {
1560 int retval;
1561
1562 retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1,
1563 (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER),
1564 0);
1565 CTR2(KTR_MLD, "%s: enqueue record = %d",
1566 __func__, retval);
1567 inm->in6m_state = MLD_REPORTING_MEMBER;
1568 in6m_clear_recorded(inm);
1569 }
1570 /* FALLTHROUGH */
1571 case MLD_REPORTING_MEMBER:
1572 case MLD_LEAVING_MEMBER:
1573 if (state_change_retransmit_timer_expired) {
1574 /*
1575 * State-change retransmission timer fired.
1576 * If there are any further pending retransmissions,
1577 * set the global pending state-change flag, and
1578 * reset the timer.
1579 */
1580 if (--inm->in6m_scrv > 0) {
1581 inm->in6m_sctimer = uri_fasthz;
1582 V_state_change_timers_running6 = 1;
1583 }
1584 /*
1585 * Retransmit the previously computed state-change
1586 * report. If there are no further pending
1587 * retransmissions, the mbuf queue will be consumed.
1588 * Update T0 state to T1 as we have now sent
1589 * a state-change.
1590 */
1591 (void)mld_v2_merge_state_changes(inm, scq);
1592
1593 in6m_commit(inm);
1594 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
1595 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
1596 if_name(inm->in6m_ifp));
1597
1598 /*
1599 * If we are leaving the group for good, make sure
1600 * we release MLD's reference to it.
1601 * This release must be deferred using a SLIST,
1602 * as we are called from a loop which traverses
1603 * the in_ifmultiaddr TAILQ.
1604 */
1605 if (inm->in6m_state == MLD_LEAVING_MEMBER &&
1606 inm->in6m_scrv == 0) {
1607 inm->in6m_state = MLD_NOT_MEMBER;
1608 SLIST_INSERT_HEAD(&mli->mli_relinmhead,
1609 inm, in6m_nrele);
1610 }
1611 }
1612 break;
1613 }
1614 }
1615
1616 /*
1617 * Switch to a different version on the given interface,
1618 * as per Section 9.12.
1619 */
1620 static void
1621 mld_set_version(struct mld_ifsoftc *mli, const int version)
1622 {
1623 int old_version_timer;
1624
1625 MLD_LOCK_ASSERT();
1626
1627 CTR4(KTR_MLD, "%s: switching to v%d on ifp %p(%s)", __func__,
1628 version, mli->mli_ifp, if_name(mli->mli_ifp));
1629
1630 if (version == MLD_VERSION_1) {
1631 /*
1632 * Compute the "Older Version Querier Present" timer as per
1633 * Section 9.12.
1634 */
1635 old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri;
1636 old_version_timer *= PR_SLOWHZ;
1637 mli->mli_v1_timer = old_version_timer;
1638 }
1639
1640 if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) {
1641 mli->mli_version = MLD_VERSION_1;
1642 mld_v2_cancel_link_timers(mli);
1643 }
1644 }
1645
1646 /*
1647 * Cancel pending MLDv2 timers for the given link and all groups
1648 * joined on it; state-change, general-query, and group-query timers.
1649 */
1650 static void
1651 mld_v2_cancel_link_timers(struct mld_ifsoftc *mli)
1652 {
1653 struct ifmultiaddr *ifma;
1654 struct ifnet *ifp;
1655 struct in6_multi *inm, *tinm;
1656
1657 CTR3(KTR_MLD, "%s: cancel v2 timers on ifp %p(%s)", __func__,
1658 mli->mli_ifp, if_name(mli->mli_ifp));
1659
1660 IN6_MULTI_LOCK_ASSERT();
1661 MLD_LOCK_ASSERT();
1662
1663 /*
1664 * Fast-track this potentially expensive operation
1665 * by checking all the global 'timer pending' flags.
1666 */
1667 if (!V_interface_timers_running6 &&
1668 !V_state_change_timers_running6 &&
1669 !V_current_state_timers_running6)
1670 return;
1671
1672 mli->mli_v2_timer = 0;
1673
1674 ifp = mli->mli_ifp;
1675
1676 IF_ADDR_RLOCK(ifp);
1677 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1678 if (ifma->ifma_addr->sa_family != AF_INET6)
1679 continue;
1680 inm = (struct in6_multi *)ifma->ifma_protospec;
1681 switch (inm->in6m_state) {
1682 case MLD_NOT_MEMBER:
1683 case MLD_SILENT_MEMBER:
1684 case MLD_IDLE_MEMBER:
1685 case MLD_LAZY_MEMBER:
1686 case MLD_SLEEPING_MEMBER:
1687 case MLD_AWAKENING_MEMBER:
1688 break;
1689 case MLD_LEAVING_MEMBER:
1690 /*
1691 * If we are leaving the group and switching
1692 * version, we need to release the final
1693 * reference held for issuing the INCLUDE {}.
1694 */
1695 SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
1696 in6m_nrele);
1697 /* FALLTHROUGH */
1698 case MLD_G_QUERY_PENDING_MEMBER:
1699 case MLD_SG_QUERY_PENDING_MEMBER:
1700 in6m_clear_recorded(inm);
1701 /* FALLTHROUGH */
1702 case MLD_REPORTING_MEMBER:
1703 inm->in6m_sctimer = 0;
1704 inm->in6m_timer = 0;
1705 inm->in6m_state = MLD_REPORTING_MEMBER;
1706 /*
1707 * Free any pending MLDv2 state-change records.
1708 */
1709 mbufq_drain(&inm->in6m_scq);
1710 break;
1711 }
1712 }
1713 IF_ADDR_RUNLOCK(ifp);
1714 SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead, in6m_nrele, tinm) {
1715 SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
1716 in6m_release_locked(inm);
1717 }
1718 }
1719
1720 /*
1721 * Global slowtimo handler.
1722 * VIMAGE: Timeout handlers are expected to service all vimages.
1723 */
1724 void
1725 mld_slowtimo(void)
1726 {
1727 VNET_ITERATOR_DECL(vnet_iter);
1728
1729 VNET_LIST_RLOCK_NOSLEEP();
1730 VNET_FOREACH(vnet_iter) {
1731 CURVNET_SET(vnet_iter);
1732 mld_slowtimo_vnet();
1733 CURVNET_RESTORE();
1734 }
1735 VNET_LIST_RUNLOCK_NOSLEEP();
1736 }
1737
1738 /*
1739 * Per-vnet slowtimo handler.
1740 */
1741 static void
1742 mld_slowtimo_vnet(void)
1743 {
1744 struct mld_ifsoftc *mli;
1745
1746 MLD_LOCK();
1747
1748 LIST_FOREACH(mli, &V_mli_head, mli_link) {
1749 mld_v1_process_querier_timers(mli);
1750 }
1751
1752 MLD_UNLOCK();
1753 }
1754
1755 /*
1756 * Update the Older Version Querier Present timers for a link.
1757 * See Section 9.12 of RFC 3810.
1758 */
1759 static void
1760 mld_v1_process_querier_timers(struct mld_ifsoftc *mli)
1761 {
1762
1763 MLD_LOCK_ASSERT();
1764
1765 if (mli->mli_version != MLD_VERSION_2 && --mli->mli_v1_timer == 0) {
1766 /*
1767 * MLDv1 Querier Present timer expired; revert to MLDv2.
1768 */
1769 CTR5(KTR_MLD,
1770 "%s: transition from v%d -> v%d on %p(%s)",
1771 __func__, mli->mli_version, MLD_VERSION_2,
1772 mli->mli_ifp, if_name(mli->mli_ifp));
1773 mli->mli_version = MLD_VERSION_2;
1774 }
1775 }
1776
1777 /*
1778 * Transmit an MLDv1 report immediately.
1779 */
1780 static int
1781 mld_v1_transmit_report(struct in6_multi *in6m, const int type)
1782 {
1783 struct ifnet *ifp;
1784 struct in6_ifaddr *ia;
1785 struct ip6_hdr *ip6;
1786 struct mbuf *mh, *md;
1787 struct mld_hdr *mld;
1788
1789 IN6_MULTI_LOCK_ASSERT();
1790 MLD_LOCK_ASSERT();
1791
1792 ifp = in6m->in6m_ifp;
1793 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
1794 /* ia may be NULL if link-local address is tentative. */
1795
1796 mh = m_gethdr(M_NOWAIT, MT_DATA);
1797 if (mh == NULL) {
1798 if (ia != NULL)
1799 ifa_free(&ia->ia_ifa);
1800 return (ENOMEM);
1801 }
1802 md = m_get(M_NOWAIT, MT_DATA);
1803 if (md == NULL) {
1804 m_free(mh);
1805 if (ia != NULL)
1806 ifa_free(&ia->ia_ifa);
1807 return (ENOMEM);
1808 }
1809 mh->m_next = md;
1810
1811 /*
1812 * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so
1813 * that ether_output() does not need to allocate another mbuf
1814 * for the header in the most common case.
1815 */
1816 M_ALIGN(mh, sizeof(struct ip6_hdr));
1817 mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr);
1818 mh->m_len = sizeof(struct ip6_hdr);
1819
1820 ip6 = mtod(mh, struct ip6_hdr *);
1821 ip6->ip6_flow = 0;
1822 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
1823 ip6->ip6_vfc |= IPV6_VERSION;
1824 ip6->ip6_nxt = IPPROTO_ICMPV6;
1825 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
1826 ip6->ip6_dst = in6m->in6m_addr;
1827
1828 md->m_len = sizeof(struct mld_hdr);
1829 mld = mtod(md, struct mld_hdr *);
1830 mld->mld_type = type;
1831 mld->mld_code = 0;
1832 mld->mld_cksum = 0;
1833 mld->mld_maxdelay = 0;
1834 mld->mld_reserved = 0;
1835 mld->mld_addr = in6m->in6m_addr;
1836 in6_clearscope(&mld->mld_addr);
1837 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
1838 sizeof(struct ip6_hdr), sizeof(struct mld_hdr));
1839
1840 mld_save_context(mh, ifp);
1841 mh->m_flags |= M_MLDV1;
1842
1843 mld_dispatch_packet(mh);
1844
1845 if (ia != NULL)
1846 ifa_free(&ia->ia_ifa);
1847 return (0);
1848 }
1849
1850 /*
1851 * Process a state change from the upper layer for the given IPv6 group.
1852 *
1853 * Each socket holds a reference on the in_multi in its own ip_moptions.
1854 * The socket layer will have made the necessary updates to.the group
1855 * state, it is now up to MLD to issue a state change report if there
1856 * has been any change between T0 (when the last state-change was issued)
1857 * and T1 (now).
1858 *
1859 * We use the MLDv2 state machine at group level. The MLd module
1860 * however makes the decision as to which MLD protocol version to speak.
1861 * A state change *from* INCLUDE {} always means an initial join.
1862 * A state change *to* INCLUDE {} always means a final leave.
1863 *
1864 * If delay is non-zero, and the state change is an initial multicast
1865 * join, the state change report will be delayed by 'delay' ticks
1866 * in units of PR_FASTHZ if MLDv1 is active on the link; otherwise
1867 * the initial MLDv2 state change report will be delayed by whichever
1868 * is sooner, a pending state-change timer or delay itself.
1869 *
1870 * VIMAGE: curvnet should have been set by caller, as this routine
1871 * is called from the socket option handlers.
1872 */
1873 int
1874 mld_change_state(struct in6_multi *inm, const int delay)
1875 {
1876 struct mld_ifsoftc *mli;
1877 struct ifnet *ifp;
1878 int error;
1879
1880 IN6_MULTI_LOCK_ASSERT();
1881
1882 error = 0;
1883
1884 /*
1885 * Try to detect if the upper layer just asked us to change state
1886 * for an interface which has now gone away.
1887 */
1888 KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__));
1889 ifp = inm->in6m_ifma->ifma_ifp;
1890 if (ifp != NULL) {
1891 /*
1892 * Sanity check that netinet6's notion of ifp is the
1893 * same as net's.
1894 */
1895 KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__));
1896 }
1897
1898 MLD_LOCK();
1899
1900 mli = MLD_IFINFO(ifp);
1901 KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
1902
1903 /*
1904 * If we detect a state transition to or from MCAST_UNDEFINED
1905 * for this group, then we are starting or finishing an MLD
1906 * life cycle for this group.
1907 */
1908 if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) {
1909 CTR3(KTR_MLD, "%s: inm transition %d -> %d", __func__,
1910 inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode);
1911 if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) {
1912 CTR1(KTR_MLD, "%s: initial join", __func__);
1913 error = mld_initial_join(inm, mli, delay);
1914 goto out_locked;
1915 } else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) {
1916 CTR1(KTR_MLD, "%s: final leave", __func__);
1917 mld_final_leave(inm, mli);
1918 goto out_locked;
1919 }
1920 } else {
1921 CTR1(KTR_MLD, "%s: filter set change", __func__);
1922 }
1923
1924 error = mld_handle_state_change(inm, mli);
1925
1926 out_locked:
1927 MLD_UNLOCK();
1928 return (error);
1929 }
1930
1931 /*
1932 * Perform the initial join for an MLD group.
1933 *
1934 * When joining a group:
1935 * If the group should have its MLD traffic suppressed, do nothing.
1936 * MLDv1 starts sending MLDv1 host membership reports.
1937 * MLDv2 will schedule an MLDv2 state-change report containing the
1938 * initial state of the membership.
1939 *
1940 * If the delay argument is non-zero, then we must delay sending the
1941 * initial state change for delay ticks (in units of PR_FASTHZ).
1942 */
1943 static int
1944 mld_initial_join(struct in6_multi *inm, struct mld_ifsoftc *mli,
1945 const int delay)
1946 {
1947 struct ifnet *ifp;
1948 struct mbufq *mq;
1949 int error, retval, syncstates;
1950 int odelay;
1951 #ifdef KTR
1952 char ip6tbuf[INET6_ADDRSTRLEN];
1953 #endif
1954
1955 CTR4(KTR_MLD, "%s: initial join %s on ifp %p(%s)",
1956 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
1957 inm->in6m_ifp, if_name(inm->in6m_ifp));
1958
1959 error = 0;
1960 syncstates = 1;
1961
1962 ifp = inm->in6m_ifp;
1963
1964 IN6_MULTI_LOCK_ASSERT();
1965 MLD_LOCK_ASSERT();
1966
1967 KASSERT(mli && mli->mli_ifp == ifp, ("%s: inconsistent ifp", __func__));
1968
1969 /*
1970 * Groups joined on loopback or marked as 'not reported',
1971 * enter the MLD_SILENT_MEMBER state and
1972 * are never reported in any protocol exchanges.
1973 * All other groups enter the appropriate state machine
1974 * for the version in use on this link.
1975 * A link marked as MLIF_SILENT causes MLD to be completely
1976 * disabled for the link.
1977 */
1978 if ((ifp->if_flags & IFF_LOOPBACK) ||
1979 (mli->mli_flags & MLIF_SILENT) ||
1980 !mld_is_addr_reported(&inm->in6m_addr)) {
1981 CTR1(KTR_MLD,
1982 "%s: not kicking state machine for silent group", __func__);
1983 inm->in6m_state = MLD_SILENT_MEMBER;
1984 inm->in6m_timer = 0;
1985 } else {
1986 /*
1987 * Deal with overlapping in_multi lifecycle.
1988 * If this group was LEAVING, then make sure
1989 * we drop the reference we picked up to keep the
1990 * group around for the final INCLUDE {} enqueue.
1991 */
1992 if (mli->mli_version == MLD_VERSION_2 &&
1993 inm->in6m_state == MLD_LEAVING_MEMBER)
1994 in6m_release_locked(inm);
1995
1996 inm->in6m_state = MLD_REPORTING_MEMBER;
1997
1998 switch (mli->mli_version) {
1999 case MLD_VERSION_1:
2000 /*
2001 * If a delay was provided, only use it if
2002 * it is greater than the delay normally
2003 * used for an MLDv1 state change report,
2004 * and delay sending the initial MLDv1 report
2005 * by not transitioning to the IDLE state.
2006 */
2007 odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI * PR_FASTHZ);
2008 if (delay) {
2009 inm->in6m_timer = max(delay, odelay);
2010 V_current_state_timers_running6 = 1;
2011 } else {
2012 inm->in6m_state = MLD_IDLE_MEMBER;
2013 error = mld_v1_transmit_report(inm,
2014 MLD_LISTENER_REPORT);
2015 if (error == 0) {
2016 inm->in6m_timer = odelay;
2017 V_current_state_timers_running6 = 1;
2018 }
2019 }
2020 break;
2021
2022 case MLD_VERSION_2:
2023 /*
2024 * Defer update of T0 to T1, until the first copy
2025 * of the state change has been transmitted.
2026 */
2027 syncstates = 0;
2028
2029 /*
2030 * Immediately enqueue a State-Change Report for
2031 * this interface, freeing any previous reports.
2032 * Don't kick the timers if there is nothing to do,
2033 * or if an error occurred.
2034 */
2035 mq = &inm->in6m_scq;
2036 mbufq_drain(mq);
2037 retval = mld_v2_enqueue_group_record(mq, inm, 1,
2038 0, 0, (mli->mli_flags & MLIF_USEALLOW));
2039 CTR2(KTR_MLD, "%s: enqueue record = %d",
2040 __func__, retval);
2041 if (retval <= 0) {
2042 error = retval * -1;
2043 break;
2044 }
2045
2046 /*
2047 * Schedule transmission of pending state-change
2048 * report up to RV times for this link. The timer
2049 * will fire at the next mld_fasttimo (~200ms),
2050 * giving us an opportunity to merge the reports.
2051 *
2052 * If a delay was provided to this function, only
2053 * use this delay if sooner than the existing one.
2054 */
2055 KASSERT(mli->mli_rv > 1,
2056 ("%s: invalid robustness %d", __func__,
2057 mli->mli_rv));
2058 inm->in6m_scrv = mli->mli_rv;
2059 if (delay) {
2060 if (inm->in6m_sctimer > 1) {
2061 inm->in6m_sctimer =
2062 min(inm->in6m_sctimer, delay);
2063 } else
2064 inm->in6m_sctimer = delay;
2065 } else
2066 inm->in6m_sctimer = 1;
2067 V_state_change_timers_running6 = 1;
2068
2069 error = 0;
2070 break;
2071 }
2072 }
2073
2074 /*
2075 * Only update the T0 state if state change is atomic,
2076 * i.e. we don't need to wait for a timer to fire before we
2077 * can consider the state change to have been communicated.
2078 */
2079 if (syncstates) {
2080 in6m_commit(inm);
2081 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2082 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2083 if_name(inm->in6m_ifp));
2084 }
2085
2086 return (error);
2087 }
2088
2089 /*
2090 * Issue an intermediate state change during the life-cycle.
2091 */
2092 static int
2093 mld_handle_state_change(struct in6_multi *inm, struct mld_ifsoftc *mli)
2094 {
2095 struct ifnet *ifp;
2096 int retval;
2097 #ifdef KTR
2098 char ip6tbuf[INET6_ADDRSTRLEN];
2099 #endif
2100
2101 CTR4(KTR_MLD, "%s: state change for %s on ifp %p(%s)",
2102 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2103 inm->in6m_ifp, if_name(inm->in6m_ifp));
2104
2105 ifp = inm->in6m_ifp;
2106
2107 IN6_MULTI_LOCK_ASSERT();
2108 MLD_LOCK_ASSERT();
2109
2110 KASSERT(mli && mli->mli_ifp == ifp,
2111 ("%s: inconsistent ifp", __func__));
2112
2113 if ((ifp->if_flags & IFF_LOOPBACK) ||
2114 (mli->mli_flags & MLIF_SILENT) ||
2115 !mld_is_addr_reported(&inm->in6m_addr) ||
2116 (mli->mli_version != MLD_VERSION_2)) {
2117 if (!mld_is_addr_reported(&inm->in6m_addr)) {
2118 CTR1(KTR_MLD,
2119 "%s: not kicking state machine for silent group", __func__);
2120 }
2121 CTR1(KTR_MLD, "%s: nothing to do", __func__);
2122 in6m_commit(inm);
2123 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2124 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2125 if_name(inm->in6m_ifp));
2126 return (0);
2127 }
2128
2129 mbufq_drain(&inm->in6m_scq);
2130
2131 retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0,
2132 (mli->mli_flags & MLIF_USEALLOW));
2133 CTR2(KTR_MLD, "%s: enqueue record = %d", __func__, retval);
2134 if (retval <= 0)
2135 return (-retval);
2136
2137 /*
2138 * If record(s) were enqueued, start the state-change
2139 * report timer for this group.
2140 */
2141 inm->in6m_scrv = mli->mli_rv;
2142 inm->in6m_sctimer = 1;
2143 V_state_change_timers_running6 = 1;
2144
2145 return (0);
2146 }
2147
2148 /*
2149 * Perform the final leave for a multicast address.
2150 *
2151 * When leaving a group:
2152 * MLDv1 sends a DONE message, if and only if we are the reporter.
2153 * MLDv2 enqueues a state-change report containing a transition
2154 * to INCLUDE {} for immediate transmission.
2155 */
2156 static void
2157 mld_final_leave(struct in6_multi *inm, struct mld_ifsoftc *mli)
2158 {
2159 int syncstates;
2160 #ifdef KTR
2161 char ip6tbuf[INET6_ADDRSTRLEN];
2162 #endif
2163
2164 syncstates = 1;
2165
2166 CTR4(KTR_MLD, "%s: final leave %s on ifp %p(%s)",
2167 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2168 inm->in6m_ifp, if_name(inm->in6m_ifp));
2169
2170 IN6_MULTI_LOCK_ASSERT();
2171 MLD_LOCK_ASSERT();
2172
2173 switch (inm->in6m_state) {
2174 case MLD_NOT_MEMBER:
2175 case MLD_SILENT_MEMBER:
2176 case MLD_LEAVING_MEMBER:
2177 /* Already leaving or left; do nothing. */
2178 CTR1(KTR_MLD,
2179 "%s: not kicking state machine for silent group", __func__);
2180 break;
2181 case MLD_REPORTING_MEMBER:
2182 case MLD_IDLE_MEMBER:
2183 case MLD_G_QUERY_PENDING_MEMBER:
2184 case MLD_SG_QUERY_PENDING_MEMBER:
2185 if (mli->mli_version == MLD_VERSION_1) {
2186 #ifdef INVARIANTS
2187 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
2188 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER)
2189 panic("%s: MLDv2 state reached, not MLDv2 mode",
2190 __func__);
2191 #endif
2192 mld_v1_transmit_report(inm, MLD_LISTENER_DONE);
2193 inm->in6m_state = MLD_NOT_MEMBER;
2194 V_current_state_timers_running6 = 1;
2195 } else if (mli->mli_version == MLD_VERSION_2) {
2196 /*
2197 * Stop group timer and all pending reports.
2198 * Immediately enqueue a state-change report
2199 * TO_IN {} to be sent on the next fast timeout,
2200 * giving us an opportunity to merge reports.
2201 */
2202 mbufq_drain(&inm->in6m_scq);
2203 inm->in6m_timer = 0;
2204 inm->in6m_scrv = mli->mli_rv;
2205 CTR4(KTR_MLD, "%s: Leaving %s/%s with %d "
2206 "pending retransmissions.", __func__,
2207 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2208 if_name(inm->in6m_ifp), inm->in6m_scrv);
2209 if (inm->in6m_scrv == 0) {
2210 inm->in6m_state = MLD_NOT_MEMBER;
2211 inm->in6m_sctimer = 0;
2212 } else {
2213 int retval;
2214
2215 in6m_acquire_locked(inm);
2216
2217 retval = mld_v2_enqueue_group_record(
2218 &inm->in6m_scq, inm, 1, 0, 0,
2219 (mli->mli_flags & MLIF_USEALLOW));
2220 KASSERT(retval != 0,
2221 ("%s: enqueue record = %d", __func__,
2222 retval));
2223
2224 inm->in6m_state = MLD_LEAVING_MEMBER;
2225 inm->in6m_sctimer = 1;
2226 V_state_change_timers_running6 = 1;
2227 syncstates = 0;
2228 }
2229 break;
2230 }
2231 break;
2232 case MLD_LAZY_MEMBER:
2233 case MLD_SLEEPING_MEMBER:
2234 case MLD_AWAKENING_MEMBER:
2235 /* Our reports are suppressed; do nothing. */
2236 break;
2237 }
2238
2239 if (syncstates) {
2240 in6m_commit(inm);
2241 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2242 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2243 if_name(inm->in6m_ifp));
2244 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
2245 CTR3(KTR_MLD, "%s: T1 now MCAST_UNDEFINED for %p/%s",
2246 __func__, &inm->in6m_addr, if_name(inm->in6m_ifp));
2247 }
2248 }
2249
2250 /*
2251 * Enqueue an MLDv2 group record to the given output queue.
2252 *
2253 * If is_state_change is zero, a current-state record is appended.
2254 * If is_state_change is non-zero, a state-change report is appended.
2255 *
2256 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2257 * If is_group_query is zero, and if there is a packet with free space
2258 * at the tail of the queue, it will be appended to providing there
2259 * is enough free space.
2260 * Otherwise a new mbuf packet chain is allocated.
2261 *
2262 * If is_source_query is non-zero, each source is checked to see if
2263 * it was recorded for a Group-Source query, and will be omitted if
2264 * it is not both in-mode and recorded.
2265 *
2266 * If use_block_allow is non-zero, state change reports for initial join
2267 * and final leave, on an inclusive mode group with a source list, will be
2268 * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively.
2269 *
2270 * The function will attempt to allocate leading space in the packet
2271 * for the IPv6+ICMP headers to be prepended without fragmenting the chain.
2272 *
2273 * If successful the size of all data appended to the queue is returned,
2274 * otherwise an error code less than zero is returned, or zero if
2275 * no record(s) were appended.
2276 */
2277 static int
2278 mld_v2_enqueue_group_record(struct mbufq *mq, struct in6_multi *inm,
2279 const int is_state_change, const int is_group_query,
2280 const int is_source_query, const int use_block_allow)
2281 {
2282 struct mldv2_record mr;
2283 struct mldv2_record *pmr;
2284 struct ifnet *ifp;
2285 struct ip6_msource *ims, *nims;
2286 struct mbuf *m0, *m, *md;
2287 int error, is_filter_list_change;
2288 int minrec0len, m0srcs, msrcs, nbytes, off;
2289 int record_has_sources;
2290 int now;
2291 int type;
2292 uint8_t mode;
2293 #ifdef KTR
2294 char ip6tbuf[INET6_ADDRSTRLEN];
2295 #endif
2296
2297 IN6_MULTI_LOCK_ASSERT();
2298
2299 error = 0;
2300 ifp = inm->in6m_ifp;
2301 is_filter_list_change = 0;
2302 m = NULL;
2303 m0 = NULL;
2304 m0srcs = 0;
2305 msrcs = 0;
2306 nbytes = 0;
2307 nims = NULL;
2308 record_has_sources = 1;
2309 pmr = NULL;
2310 type = MLD_DO_NOTHING;
2311 mode = inm->in6m_st[1].iss_fmode;
2312
2313 /*
2314 * If we did not transition out of ASM mode during t0->t1,
2315 * and there are no source nodes to process, we can skip
2316 * the generation of source records.
2317 */
2318 if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 &&
2319 inm->in6m_nsrc == 0)
2320 record_has_sources = 0;
2321
2322 if (is_state_change) {
2323 /*
2324 * Queue a state change record.
2325 * If the mode did not change, and there are non-ASM
2326 * listeners or source filters present,
2327 * we potentially need to issue two records for the group.
2328 * If there are ASM listeners, and there was no filter
2329 * mode transition of any kind, do nothing.
2330 *
2331 * If we are transitioning to MCAST_UNDEFINED, we need
2332 * not send any sources. A transition to/from this state is
2333 * considered inclusive with some special treatment.
2334 *
2335 * If we are rewriting initial joins/leaves to use
2336 * ALLOW/BLOCK, and the group's membership is inclusive,
2337 * we need to send sources in all cases.
2338 */
2339 if (mode != inm->in6m_st[0].iss_fmode) {
2340 if (mode == MCAST_EXCLUDE) {
2341 CTR1(KTR_MLD, "%s: change to EXCLUDE",
2342 __func__);
2343 type = MLD_CHANGE_TO_EXCLUDE_MODE;
2344 } else {
2345 CTR1(KTR_MLD, "%s: change to INCLUDE",
2346 __func__);
2347 if (use_block_allow) {
2348 /*
2349 * XXX
2350 * Here we're interested in state
2351 * edges either direction between
2352 * MCAST_UNDEFINED and MCAST_INCLUDE.
2353 * Perhaps we should just check
2354 * the group state, rather than
2355 * the filter mode.
2356 */
2357 if (mode == MCAST_UNDEFINED) {
2358 type = MLD_BLOCK_OLD_SOURCES;
2359 } else {
2360 type = MLD_ALLOW_NEW_SOURCES;
2361 }
2362 } else {
2363 type = MLD_CHANGE_TO_INCLUDE_MODE;
2364 if (mode == MCAST_UNDEFINED)
2365 record_has_sources = 0;
2366 }
2367 }
2368 } else {
2369 if (record_has_sources) {
2370 is_filter_list_change = 1;
2371 } else {
2372 type = MLD_DO_NOTHING;
2373 }
2374 }
2375 } else {
2376 /*
2377 * Queue a current state record.
2378 */
2379 if (mode == MCAST_EXCLUDE) {
2380 type = MLD_MODE_IS_EXCLUDE;
2381 } else if (mode == MCAST_INCLUDE) {
2382 type = MLD_MODE_IS_INCLUDE;
2383 KASSERT(inm->in6m_st[1].iss_asm == 0,
2384 ("%s: inm %p is INCLUDE but ASM count is %d",
2385 __func__, inm, inm->in6m_st[1].iss_asm));
2386 }
2387 }
2388
2389 /*
2390 * Generate the filter list changes using a separate function.
2391 */
2392 if (is_filter_list_change)
2393 return (mld_v2_enqueue_filter_change(mq, inm));
2394
2395 if (type == MLD_DO_NOTHING) {
2396 CTR3(KTR_MLD, "%s: nothing to do for %s/%s",
2397 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2398 if_name(inm->in6m_ifp));
2399 return (0);
2400 }
2401
2402 /*
2403 * If any sources are present, we must be able to fit at least
2404 * one in the trailing space of the tail packet's mbuf,
2405 * ideally more.
2406 */
2407 minrec0len = sizeof(struct mldv2_record);
2408 if (record_has_sources)
2409 minrec0len += sizeof(struct in6_addr);
2410
2411 CTR4(KTR_MLD, "%s: queueing %s for %s/%s", __func__,
2412 mld_rec_type_to_str(type),
2413 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2414 if_name(inm->in6m_ifp));
2415
2416 /*
2417 * Check if we have a packet in the tail of the queue for this
2418 * group into which the first group record for this group will fit.
2419 * Otherwise allocate a new packet.
2420 * Always allocate leading space for IP6+RA+ICMPV6+REPORT.
2421 * Note: Group records for G/GSR query responses MUST be sent
2422 * in their own packet.
2423 */
2424 m0 = mbufq_last(mq);
2425 if (!is_group_query &&
2426 m0 != NULL &&
2427 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) &&
2428 (m0->m_pkthdr.len + minrec0len) <
2429 (ifp->if_mtu - MLD_MTUSPACE)) {
2430 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2431 sizeof(struct mldv2_record)) /
2432 sizeof(struct in6_addr);
2433 m = m0;
2434 CTR1(KTR_MLD, "%s: use existing packet", __func__);
2435 } else {
2436 if (mbufq_full(mq)) {
2437 CTR1(KTR_MLD, "%s: outbound queue full", __func__);
2438 return (-ENOMEM);
2439 }
2440 m = NULL;
2441 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2442 sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
2443 if (!is_state_change && !is_group_query)
2444 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2445 if (m == NULL)
2446 m = m_gethdr(M_NOWAIT, MT_DATA);
2447 if (m == NULL)
2448 return (-ENOMEM);
2449
2450 mld_save_context(m, ifp);
2451
2452 CTR1(KTR_MLD, "%s: allocated first packet", __func__);
2453 }
2454
2455 /*
2456 * Append group record.
2457 * If we have sources, we don't know how many yet.
2458 */
2459 mr.mr_type = type;
2460 mr.mr_datalen = 0;
2461 mr.mr_numsrc = 0;
2462 mr.mr_addr = inm->in6m_addr;
2463 in6_clearscope(&mr.mr_addr);
2464 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
2465 if (m != m0)
2466 m_freem(m);
2467 CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
2468 return (-ENOMEM);
2469 }
2470 nbytes += sizeof(struct mldv2_record);
2471
2472 /*
2473 * Append as many sources as will fit in the first packet.
2474 * If we are appending to a new packet, the chain allocation
2475 * may potentially use clusters; use m_getptr() in this case.
2476 * If we are appending to an existing packet, we need to obtain
2477 * a pointer to the group record after m_append(), in case a new
2478 * mbuf was allocated.
2479 *
2480 * Only append sources which are in-mode at t1. If we are
2481 * transitioning to MCAST_UNDEFINED state on the group, and
2482 * use_block_allow is zero, do not include source entries.
2483 * Otherwise, we need to include this source in the report.
2484 *
2485 * Only report recorded sources in our filter set when responding
2486 * to a group-source query.
2487 */
2488 if (record_has_sources) {
2489 if (m == m0) {
2490 md = m_last(m);
2491 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
2492 md->m_len - nbytes);
2493 } else {
2494 md = m_getptr(m, 0, &off);
2495 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
2496 off);
2497 }
2498 msrcs = 0;
2499 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs,
2500 nims) {
2501 CTR2(KTR_MLD, "%s: visit node %s", __func__,
2502 ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2503 now = im6s_get_mode(inm, ims, 1);
2504 CTR2(KTR_MLD, "%s: node is %d", __func__, now);
2505 if ((now != mode) ||
2506 (now == mode &&
2507 (!use_block_allow && mode == MCAST_UNDEFINED))) {
2508 CTR1(KTR_MLD, "%s: skip node", __func__);
2509 continue;
2510 }
2511 if (is_source_query && ims->im6s_stp == 0) {
2512 CTR1(KTR_MLD, "%s: skip unrecorded node",
2513 __func__);
2514 continue;
2515 }
2516 CTR1(KTR_MLD, "%s: append node", __func__);
2517 if (!m_append(m, sizeof(struct in6_addr),
2518 (void *)&ims->im6s_addr)) {
2519 if (m != m0)
2520 m_freem(m);
2521 CTR1(KTR_MLD, "%s: m_append() failed.",
2522 __func__);
2523 return (-ENOMEM);
2524 }
2525 nbytes += sizeof(struct in6_addr);
2526 ++msrcs;
2527 if (msrcs == m0srcs)
2528 break;
2529 }
2530 CTR2(KTR_MLD, "%s: msrcs is %d this packet", __func__,
2531 msrcs);
2532 pmr->mr_numsrc = htons(msrcs);
2533 nbytes += (msrcs * sizeof(struct in6_addr));
2534 }
2535
2536 if (is_source_query && msrcs == 0) {
2537 CTR1(KTR_MLD, "%s: no recorded sources to report", __func__);
2538 if (m != m0)
2539 m_freem(m);
2540 return (0);
2541 }
2542
2543 /*
2544 * We are good to go with first packet.
2545 */
2546 if (m != m0) {
2547 CTR1(KTR_MLD, "%s: enqueueing first packet", __func__);
2548 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2549 mbufq_enqueue(mq, m);
2550 } else
2551 m->m_pkthdr.PH_vt.vt_nrecs++;
2552
2553 /*
2554 * No further work needed if no source list in packet(s).
2555 */
2556 if (!record_has_sources)
2557 return (nbytes);
2558
2559 /*
2560 * Whilst sources remain to be announced, we need to allocate
2561 * a new packet and fill out as many sources as will fit.
2562 * Always try for a cluster first.
2563 */
2564 while (nims != NULL) {
2565 if (mbufq_full(mq)) {
2566 CTR1(KTR_MLD, "%s: outbound queue full", __func__);
2567 return (-ENOMEM);
2568 }
2569 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2570 if (m == NULL)
2571 m = m_gethdr(M_NOWAIT, MT_DATA);
2572 if (m == NULL)
2573 return (-ENOMEM);
2574 mld_save_context(m, ifp);
2575 md = m_getptr(m, 0, &off);
2576 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off);
2577 CTR1(KTR_MLD, "%s: allocated next packet", __func__);
2578
2579 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
2580 if (m != m0)
2581 m_freem(m);
2582 CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
2583 return (-ENOMEM);
2584 }
2585 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2586 nbytes += sizeof(struct mldv2_record);
2587
2588 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2589 sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
2590
2591 msrcs = 0;
2592 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2593 CTR2(KTR_MLD, "%s: visit node %s",
2594 __func__, ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2595 now = im6s_get_mode(inm, ims, 1);
2596 if ((now != mode) ||
2597 (now == mode &&
2598 (!use_block_allow && mode == MCAST_UNDEFINED))) {
2599 CTR1(KTR_MLD, "%s: skip node", __func__);
2600 continue;
2601 }
2602 if (is_source_query && ims->im6s_stp == 0) {
2603 CTR1(KTR_MLD, "%s: skip unrecorded node",
2604 __func__);
2605 continue;
2606 }
2607 CTR1(KTR_MLD, "%s: append node", __func__);
2608 if (!m_append(m, sizeof(struct in6_addr),
2609 (void *)&ims->im6s_addr)) {
2610 if (m != m0)
2611 m_freem(m);
2612 CTR1(KTR_MLD, "%s: m_append() failed.",
2613 __func__);
2614 return (-ENOMEM);
2615 }
2616 ++msrcs;
2617 if (msrcs == m0srcs)
2618 break;
2619 }
2620 pmr->mr_numsrc = htons(msrcs);
2621 nbytes += (msrcs * sizeof(struct in6_addr));
2622
2623 CTR1(KTR_MLD, "%s: enqueueing next packet", __func__);
2624 mbufq_enqueue(mq, m);
2625 }
2626
2627 return (nbytes);
2628 }
2629
2630 /*
2631 * Type used to mark record pass completion.
2632 * We exploit the fact we can cast to this easily from the
2633 * current filter modes on each ip_msource node.
2634 */
2635 typedef enum {
2636 REC_NONE = 0x00, /* MCAST_UNDEFINED */
2637 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
2638 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
2639 REC_FULL = REC_ALLOW | REC_BLOCK
2640 } rectype_t;
2641
2642 /*
2643 * Enqueue an MLDv2 filter list change to the given output queue.
2644 *
2645 * Source list filter state is held in an RB-tree. When the filter list
2646 * for a group is changed without changing its mode, we need to compute
2647 * the deltas between T0 and T1 for each source in the filter set,
2648 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
2649 *
2650 * As we may potentially queue two record types, and the entire R-B tree
2651 * needs to be walked at once, we break this out into its own function
2652 * so we can generate a tightly packed queue of packets.
2653 *
2654 * XXX This could be written to only use one tree walk, although that makes
2655 * serializing into the mbuf chains a bit harder. For now we do two walks
2656 * which makes things easier on us, and it may or may not be harder on
2657 * the L2 cache.
2658 *
2659 * If successful the size of all data appended to the queue is returned,
2660 * otherwise an error code less than zero is returned, or zero if
2661 * no record(s) were appended.
2662 */
2663 static int
2664 mld_v2_enqueue_filter_change(struct mbufq *mq, struct in6_multi *inm)
2665 {
2666 static const int MINRECLEN =
2667 sizeof(struct mldv2_record) + sizeof(struct in6_addr);
2668 struct ifnet *ifp;
2669 struct mldv2_record mr;
2670 struct mldv2_record *pmr;
2671 struct ip6_msource *ims, *nims;
2672 struct mbuf *m, *m0, *md;
2673 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
2674 int nallow, nblock;
2675 uint8_t mode, now, then;
2676 rectype_t crt, drt, nrt;
2677 #ifdef KTR
2678 char ip6tbuf[INET6_ADDRSTRLEN];
2679 #endif
2680
2681 IN6_MULTI_LOCK_ASSERT();
2682
2683 if (inm->in6m_nsrc == 0 ||
2684 (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0))
2685 return (0);
2686
2687 ifp = inm->in6m_ifp; /* interface */
2688 mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */
2689 crt = REC_NONE; /* current group record type */
2690 drt = REC_NONE; /* mask of completed group record types */
2691 nrt = REC_NONE; /* record type for current node */
2692 m0srcs = 0; /* # source which will fit in current mbuf chain */
2693 npbytes = 0; /* # of bytes appended this packet */
2694 nbytes = 0; /* # of bytes appended to group's state-change queue */
2695 rsrcs = 0; /* # sources encoded in current record */
2696 schanged = 0; /* # nodes encoded in overall filter change */
2697 nallow = 0; /* # of source entries in ALLOW_NEW */
2698 nblock = 0; /* # of source entries in BLOCK_OLD */
2699 nims = NULL; /* next tree node pointer */
2700
2701 /*
2702 * For each possible filter record mode.
2703 * The first kind of source we encounter tells us which
2704 * is the first kind of record we start appending.
2705 * If a node transitioned to UNDEFINED at t1, its mode is treated
2706 * as the inverse of the group's filter mode.
2707 */
2708 while (drt != REC_FULL) {
2709 do {
2710 m0 = mbufq_last(mq);
2711 if (m0 != NULL &&
2712 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
2713 MLD_V2_REPORT_MAXRECS) &&
2714 (m0->m_pkthdr.len + MINRECLEN) <
2715 (ifp->if_mtu - MLD_MTUSPACE)) {
2716 m = m0;
2717 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2718 sizeof(struct mldv2_record)) /
2719 sizeof(struct in6_addr);
2720 CTR1(KTR_MLD,
2721 "%s: use previous packet", __func__);
2722 } else {
2723 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2724 if (m == NULL)
2725 m = m_gethdr(M_NOWAIT, MT_DATA);
2726 if (m == NULL) {
2727 CTR1(KTR_MLD,
2728 "%s: m_get*() failed", __func__);
2729 return (-ENOMEM);
2730 }
2731 m->m_pkthdr.PH_vt.vt_nrecs = 0;
2732 mld_save_context(m, ifp);
2733 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2734 sizeof(struct mldv2_record)) /
2735 sizeof(struct in6_addr);
2736 npbytes = 0;
2737 CTR1(KTR_MLD,
2738 "%s: allocated new packet", __func__);
2739 }
2740 /*
2741 * Append the MLD group record header to the
2742 * current packet's data area.
2743 * Recalculate pointer to free space for next
2744 * group record, in case m_append() allocated
2745 * a new mbuf or cluster.
2746 */
2747 memset(&mr, 0, sizeof(mr));
2748 mr.mr_addr = inm->in6m_addr;
2749 in6_clearscope(&mr.mr_addr);
2750 if (!m_append(m, sizeof(mr), (void *)&mr)) {
2751 if (m != m0)
2752 m_freem(m);
2753 CTR1(KTR_MLD,
2754 "%s: m_append() failed", __func__);
2755 return (-ENOMEM);
2756 }
2757 npbytes += sizeof(struct mldv2_record);
2758 if (m != m0) {
2759 /* new packet; offset in chain */
2760 md = m_getptr(m, npbytes -
2761 sizeof(struct mldv2_record), &off);
2762 pmr = (struct mldv2_record *)(mtod(md,
2763 uint8_t *) + off);
2764 } else {
2765 /* current packet; offset from last append */
2766 md = m_last(m);
2767 pmr = (struct mldv2_record *)(mtod(md,
2768 uint8_t *) + md->m_len -
2769 sizeof(struct mldv2_record));
2770 }
2771 /*
2772 * Begin walking the tree for this record type
2773 * pass, or continue from where we left off
2774 * previously if we had to allocate a new packet.
2775 * Only report deltas in-mode at t1.
2776 * We need not report included sources as allowed
2777 * if we are in inclusive mode on the group,
2778 * however the converse is not true.
2779 */
2780 rsrcs = 0;
2781 if (nims == NULL) {
2782 nims = RB_MIN(ip6_msource_tree,
2783 &inm->in6m_srcs);
2784 }
2785 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2786 CTR2(KTR_MLD, "%s: visit node %s", __func__,
2787 ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2788 now = im6s_get_mode(inm, ims, 1);
2789 then = im6s_get_mode(inm, ims, 0);
2790 CTR3(KTR_MLD, "%s: mode: t0 %d, t1 %d",
2791 __func__, then, now);
2792 if (now == then) {
2793 CTR1(KTR_MLD,
2794 "%s: skip unchanged", __func__);
2795 continue;
2796 }
2797 if (mode == MCAST_EXCLUDE &&
2798 now == MCAST_INCLUDE) {
2799 CTR1(KTR_MLD,
2800 "%s: skip IN src on EX group",
2801 __func__);
2802 continue;
2803 }
2804 nrt = (rectype_t)now;
2805 if (nrt == REC_NONE)
2806 nrt = (rectype_t)(~mode & REC_FULL);
2807 if (schanged++ == 0) {
2808 crt = nrt;
2809 } else if (crt != nrt)
2810 continue;
2811 if (!m_append(m, sizeof(struct in6_addr),
2812 (void *)&ims->im6s_addr)) {
2813 if (m != m0)
2814 m_freem(m);
2815 CTR1(KTR_MLD,
2816 "%s: m_append() failed", __func__);
2817 return (-ENOMEM);
2818 }
2819 nallow += !!(crt == REC_ALLOW);
2820 nblock += !!(crt == REC_BLOCK);
2821 if (++rsrcs == m0srcs)
2822 break;
2823 }
2824 /*
2825 * If we did not append any tree nodes on this
2826 * pass, back out of allocations.
2827 */
2828 if (rsrcs == 0) {
2829 npbytes -= sizeof(struct mldv2_record);
2830 if (m != m0) {
2831 CTR1(KTR_MLD,
2832 "%s: m_free(m)", __func__);
2833 m_freem(m);
2834 } else {
2835 CTR1(KTR_MLD,
2836 "%s: m_adj(m, -mr)", __func__);
2837 m_adj(m, -((int)sizeof(
2838 struct mldv2_record)));
2839 }
2840 continue;
2841 }
2842 npbytes += (rsrcs * sizeof(struct in6_addr));
2843 if (crt == REC_ALLOW)
2844 pmr->mr_type = MLD_ALLOW_NEW_SOURCES;
2845 else if (crt == REC_BLOCK)
2846 pmr->mr_type = MLD_BLOCK_OLD_SOURCES;
2847 pmr->mr_numsrc = htons(rsrcs);
2848 /*
2849 * Count the new group record, and enqueue this
2850 * packet if it wasn't already queued.
2851 */
2852 m->m_pkthdr.PH_vt.vt_nrecs++;
2853 if (m != m0)
2854 mbufq_enqueue(mq, m);
2855 nbytes += npbytes;
2856 } while (nims != NULL);
2857 drt |= crt;
2858 crt = (~crt & REC_FULL);
2859 }
2860
2861 CTR3(KTR_MLD, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
2862 nallow, nblock);
2863
2864 return (nbytes);
2865 }
2866
2867 static int
2868 mld_v2_merge_state_changes(struct in6_multi *inm, struct mbufq *scq)
2869 {
2870 struct mbufq *gq;
2871 struct mbuf *m; /* pending state-change */
2872 struct mbuf *m0; /* copy of pending state-change */
2873 struct mbuf *mt; /* last state-change in packet */
2874 int docopy, domerge;
2875 u_int recslen;
2876
2877 docopy = 0;
2878 domerge = 0;
2879 recslen = 0;
2880
2881 IN6_MULTI_LOCK_ASSERT();
2882 MLD_LOCK_ASSERT();
2883
2884 /*
2885 * If there are further pending retransmissions, make a writable
2886 * copy of each queued state-change message before merging.
2887 */
2888 if (inm->in6m_scrv > 0)
2889 docopy = 1;
2890
2891 gq = &inm->in6m_scq;
2892 #ifdef KTR
2893 if (mbufq_first(gq) == NULL) {
2894 CTR2(KTR_MLD, "%s: WARNING: queue for inm %p is empty",
2895 __func__, inm);
2896 }
2897 #endif
2898
2899 m = mbufq_first(gq);
2900 while (m != NULL) {
2901 /*
2902 * Only merge the report into the current packet if
2903 * there is sufficient space to do so; an MLDv2 report
2904 * packet may only contain 65,535 group records.
2905 * Always use a simple mbuf chain concatentation to do this,
2906 * as large state changes for single groups may have
2907 * allocated clusters.
2908 */
2909 domerge = 0;
2910 mt = mbufq_last(scq);
2911 if (mt != NULL) {
2912 recslen = m_length(m, NULL);
2913
2914 if ((mt->m_pkthdr.PH_vt.vt_nrecs +
2915 m->m_pkthdr.PH_vt.vt_nrecs <=
2916 MLD_V2_REPORT_MAXRECS) &&
2917 (mt->m_pkthdr.len + recslen <=
2918 (inm->in6m_ifp->if_mtu - MLD_MTUSPACE)))
2919 domerge = 1;
2920 }
2921
2922 if (!domerge && mbufq_full(gq)) {
2923 CTR2(KTR_MLD,
2924 "%s: outbound queue full, skipping whole packet %p",
2925 __func__, m);
2926 mt = m->m_nextpkt;
2927 if (!docopy)
2928 m_freem(m);
2929 m = mt;
2930 continue;
2931 }
2932
2933 if (!docopy) {
2934 CTR2(KTR_MLD, "%s: dequeueing %p", __func__, m);
2935 m0 = mbufq_dequeue(gq);
2936 m = m0->m_nextpkt;
2937 } else {
2938 CTR2(KTR_MLD, "%s: copying %p", __func__, m);
2939 m0 = m_dup(m, M_NOWAIT);
2940 if (m0 == NULL)
2941 return (ENOMEM);
2942 m0->m_nextpkt = NULL;
2943 m = m->m_nextpkt;
2944 }
2945
2946 if (!domerge) {
2947 CTR3(KTR_MLD, "%s: queueing %p to scq %p)",
2948 __func__, m0, scq);
2949 mbufq_enqueue(scq, m0);
2950 } else {
2951 struct mbuf *mtl; /* last mbuf of packet mt */
2952
2953 CTR3(KTR_MLD, "%s: merging %p with ifscq tail %p)",
2954 __func__, m0, mt);
2955
2956 mtl = m_last(mt);
2957 m0->m_flags &= ~M_PKTHDR;
2958 mt->m_pkthdr.len += recslen;
2959 mt->m_pkthdr.PH_vt.vt_nrecs +=
2960 m0->m_pkthdr.PH_vt.vt_nrecs;
2961
2962 mtl->m_next = m0;
2963 }
2964 }
2965
2966 return (0);
2967 }
2968
2969 /*
2970 * Respond to a pending MLDv2 General Query.
2971 */
2972 static void
2973 mld_v2_dispatch_general_query(struct mld_ifsoftc *mli)
2974 {
2975 struct ifmultiaddr *ifma;
2976 struct ifnet *ifp;
2977 struct in6_multi *inm;
2978 int retval;
2979
2980 IN6_MULTI_LOCK_ASSERT();
2981 MLD_LOCK_ASSERT();
2982
2983 KASSERT(mli->mli_version == MLD_VERSION_2,
2984 ("%s: called when version %d", __func__, mli->mli_version));
2985
2986 /*
2987 * Check that there are some packets queued. If so, send them first.
2988 * For large number of groups the reply to general query can take
2989 * many packets, we should finish sending them before starting of
2990 * queuing the new reply.
2991 */
2992 if (mbufq_len(&mli->mli_gq) != 0)
2993 goto send;
2994
2995 ifp = mli->mli_ifp;
2996
2997 IF_ADDR_RLOCK(ifp);
2998 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2999 if (ifma->ifma_addr->sa_family != AF_INET6 ||
3000 ifma->ifma_protospec == NULL)
3001 continue;
3002
3003 inm = (struct in6_multi *)ifma->ifma_protospec;
3004 KASSERT(ifp == inm->in6m_ifp,
3005 ("%s: inconsistent ifp", __func__));
3006
3007 switch (inm->in6m_state) {
3008 case MLD_NOT_MEMBER:
3009 case MLD_SILENT_MEMBER:
3010 break;
3011 case MLD_REPORTING_MEMBER:
3012 case MLD_IDLE_MEMBER:
3013 case MLD_LAZY_MEMBER:
3014 case MLD_SLEEPING_MEMBER:
3015 case MLD_AWAKENING_MEMBER:
3016 inm->in6m_state = MLD_REPORTING_MEMBER;
3017 retval = mld_v2_enqueue_group_record(&mli->mli_gq,
3018 inm, 0, 0, 0, 0);
3019 CTR2(KTR_MLD, "%s: enqueue record = %d",
3020 __func__, retval);
3021 break;
3022 case MLD_G_QUERY_PENDING_MEMBER:
3023 case MLD_SG_QUERY_PENDING_MEMBER:
3024 case MLD_LEAVING_MEMBER:
3025 break;
3026 }
3027 }
3028 IF_ADDR_RUNLOCK(ifp);
3029
3030 send:
3031 mld_dispatch_queue(&mli->mli_gq, MLD_MAX_RESPONSE_BURST);
3032
3033 /*
3034 * Slew transmission of bursts over 500ms intervals.
3035 */
3036 if (mbufq_first(&mli->mli_gq) != NULL) {
3037 mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY(
3038 MLD_RESPONSE_BURST_INTERVAL);
3039 V_interface_timers_running6 = 1;
3040 }
3041 }
3042
3043 /*
3044 * Transmit the next pending message in the output queue.
3045 *
3046 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3047 * MRT: Nothing needs to be done, as MLD traffic is always local to
3048 * a link and uses a link-scope multicast address.
3049 */
3050 static void
3051 mld_dispatch_packet(struct mbuf *m)
3052 {
3053 struct ip6_moptions im6o;
3054 struct ifnet *ifp;
3055 struct ifnet *oifp;
3056 struct mbuf *m0;
3057 struct mbuf *md;
3058 struct ip6_hdr *ip6;
3059 struct mld_hdr *mld;
3060 int error;
3061 int off;
3062 int type;
3063 uint32_t ifindex;
3064
3065 CTR2(KTR_MLD, "%s: transmit %p", __func__, m);
3066
3067 /*
3068 * Set VNET image pointer from enqueued mbuf chain
3069 * before doing anything else. Whilst we use interface
3070 * indexes to guard against interface detach, they are
3071 * unique to each VIMAGE and must be retrieved.
3072 */
3073 ifindex = mld_restore_context(m);
3074
3075 /*
3076 * Check if the ifnet still exists. This limits the scope of
3077 * any race in the absence of a global ifp lock for low cost
3078 * (an array lookup).
3079 */
3080 ifp = ifnet_byindex(ifindex);
3081 if (ifp == NULL) {
3082 CTR3(KTR_MLD, "%s: dropped %p as ifindex %u went away.",
3083 __func__, m, ifindex);
3084 m_freem(m);
3085 IP6STAT_INC(ip6s_noroute);
3086 goto out;
3087 }
3088
3089 im6o.im6o_multicast_hlim = 1;
3090 im6o.im6o_multicast_loop = (V_ip6_mrouter != NULL);
3091 im6o.im6o_multicast_ifp = ifp;
3092
3093 if (m->m_flags & M_MLDV1) {
3094 m0 = m;
3095 } else {
3096 m0 = mld_v2_encap_report(ifp, m);
3097 if (m0 == NULL) {
3098 CTR2(KTR_MLD, "%s: dropped %p", __func__, m);
3099 IP6STAT_INC(ip6s_odropped);
3100 goto out;
3101 }
3102 }
3103
3104 mld_scrub_context(m0);
3105 m_clrprotoflags(m);
3106 m0->m_pkthdr.rcvif = V_loif;
3107
3108 ip6 = mtod(m0, struct ip6_hdr *);
3109 #if 0
3110 (void)in6_setscope(&ip6->ip6_dst, ifp, NULL); /* XXX LOR */
3111 #else
3112 /*
3113 * XXX XXX Break some KPI rules to prevent an LOR which would
3114 * occur if we called in6_setscope() at transmission.
3115 * See comments at top of file.
3116 */
3117 MLD_EMBEDSCOPE(&ip6->ip6_dst, ifp->if_index);
3118 #endif
3119
3120 /*
3121 * Retrieve the ICMPv6 type before handoff to ip6_output(),
3122 * so we can bump the stats.
3123 */
3124 md = m_getptr(m0, sizeof(struct ip6_hdr), &off);
3125 mld = (struct mld_hdr *)(mtod(md, uint8_t *) + off);
3126 type = mld->mld_type;
3127
3128 error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, &im6o,
3129 &oifp, NULL);
3130 if (error) {
3131 CTR3(KTR_MLD, "%s: ip6_output(%p) = %d", __func__, m0, error);
3132 goto out;
3133 }
3134 ICMP6STAT_INC(icp6s_outhist[type]);
3135 if (oifp != NULL) {
3136 icmp6_ifstat_inc(oifp, ifs6_out_msg);
3137 switch (type) {
3138 case MLD_LISTENER_REPORT:
3139 case MLDV2_LISTENER_REPORT:
3140 icmp6_ifstat_inc(oifp, ifs6_out_mldreport);
3141 break;
3142 case MLD_LISTENER_DONE:
3143 icmp6_ifstat_inc(oifp, ifs6_out_mlddone);
3144 break;
3145 }
3146 }
3147 out:
3148 return;
3149 }
3150
3151 /*
3152 * Encapsulate an MLDv2 report.
3153 *
3154 * KAME IPv6 requires that hop-by-hop options be passed separately,
3155 * and that the IPv6 header be prepended in a separate mbuf.
3156 *
3157 * Returns a pointer to the new mbuf chain head, or NULL if the
3158 * allocation failed.
3159 */
3160 static struct mbuf *
3161 mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
3162 {
3163 struct mbuf *mh;
3164 struct mldv2_report *mld;
3165 struct ip6_hdr *ip6;
3166 struct in6_ifaddr *ia;
3167 int mldreclen;
3168
3169 KASSERT(ifp != NULL, ("%s: null ifp", __func__));
3170 KASSERT((m->m_flags & M_PKTHDR),
3171 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3172
3173 /*
3174 * RFC3590: OK to send as :: or tentative during DAD.
3175 */
3176 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
3177 if (ia == NULL)
3178 CTR1(KTR_MLD, "%s: warning: ia is NULL", __func__);
3179
3180 mh = m_gethdr(M_NOWAIT, MT_DATA);
3181 if (mh == NULL) {
3182 if (ia != NULL)
3183 ifa_free(&ia->ia_ifa);
3184 m_freem(m);
3185 return (NULL);
3186 }
3187 M_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report));
3188
3189 mldreclen = m_length(m, NULL);
3190 CTR2(KTR_MLD, "%s: mldreclen is %d", __func__, mldreclen);
3191
3192 mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report);
3193 mh->m_pkthdr.len = sizeof(struct ip6_hdr) +
3194 sizeof(struct mldv2_report) + mldreclen;
3195
3196 ip6 = mtod(mh, struct ip6_hdr *);
3197 ip6->ip6_flow = 0;
3198 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
3199 ip6->ip6_vfc |= IPV6_VERSION;
3200 ip6->ip6_nxt = IPPROTO_ICMPV6;
3201 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
3202 if (ia != NULL)
3203 ifa_free(&ia->ia_ifa);
3204 ip6->ip6_dst = in6addr_linklocal_allv2routers;
3205 /* scope ID will be set in netisr */
3206
3207 mld = (struct mldv2_report *)(ip6 + 1);
3208 mld->mld_type = MLDV2_LISTENER_REPORT;
3209 mld->mld_code = 0;
3210 mld->mld_cksum = 0;
3211 mld->mld_v2_reserved = 0;
3212 mld->mld_v2_numrecs = htons(m->m_pkthdr.PH_vt.vt_nrecs);
3213 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3214
3215 mh->m_next = m;
3216 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
3217 sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen);
3218 return (mh);
3219 }
3220
3221 #ifdef KTR
3222 static char *
3223 mld_rec_type_to_str(const int type)
3224 {
3225
3226 switch (type) {
3227 case MLD_CHANGE_TO_EXCLUDE_MODE:
3228 return "TO_EX";
3229 break;
3230 case MLD_CHANGE_TO_INCLUDE_MODE:
3231 return "TO_IN";
3232 break;
3233 case MLD_MODE_IS_EXCLUDE:
3234 return "MODE_EX";
3235 break;
3236 case MLD_MODE_IS_INCLUDE:
3237 return "MODE_IN";
3238 break;
3239 case MLD_ALLOW_NEW_SOURCES:
3240 return "ALLOW_NEW";
3241 break;
3242 case MLD_BLOCK_OLD_SOURCES:
3243 return "BLOCK_OLD";
3244 break;
3245 default:
3246 break;
3247 }
3248 return "unknown";
3249 }
3250 #endif
3251
3252 static void
3253 mld_init(void *unused __unused)
3254 {
3255
3256 CTR1(KTR_MLD, "%s: initializing", __func__);
3257 MLD_LOCK_INIT();
3258
3259 ip6_initpktopts(&mld_po);
3260 mld_po.ip6po_hlim = 1;
3261 mld_po.ip6po_hbh = &mld_ra.hbh;
3262 mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER;
3263 mld_po.ip6po_flags = IP6PO_DONTFRAG;
3264 }
3265 SYSINIT(mld_init, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_init, NULL);
3266
3267 static void
3268 mld_uninit(void *unused __unused)
3269 {
3270
3271 CTR1(KTR_MLD, "%s: tearing down", __func__);
3272 MLD_LOCK_DESTROY();
3273 }
3274 SYSUNINIT(mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_uninit, NULL);
3275
3276 static void
3277 vnet_mld_init(const void *unused __unused)
3278 {
3279
3280 CTR1(KTR_MLD, "%s: initializing", __func__);
3281
3282 LIST_INIT(&V_mli_head);
3283 }
3284 VNET_SYSINIT(vnet_mld_init, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_init,
3285 NULL);
3286
3287 static void
3288 vnet_mld_uninit(const void *unused __unused)
3289 {
3290
3291 /* This can happen if we shutdown the network stack. */
3292 CTR1(KTR_MLD, "%s: tearing down", __func__);
3293 }
3294 VNET_SYSUNINIT(vnet_mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_uninit,
3295 NULL);
3296
3297 static int
3298 mld_modevent(module_t mod, int type, void *unused __unused)
3299 {
3300
3301 switch (type) {
3302 case MOD_LOAD:
3303 case MOD_UNLOAD:
3304 break;
3305 default:
3306 return (EOPNOTSUPP);
3307 }
3308 return (0);
3309 }
3310
3311 static moduledata_t mld_mod = {
3312 "mld",
3313 mld_modevent,
3314 0
3315 };
3316 DECLARE_MODULE(mld, mld_mod, SI_SUB_PROTO_MC, SI_ORDER_ANY);
Cache object: fd6c7ed76c5525787b9d8f041a764c7e
|