FreeBSD/Linux Kernel Cross Reference
sys/netinet6/mld6.c
1 /*-
2 * Copyright (c) 2009 Bruce Simpson.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote
13 * products derived from this software without specific prior written
14 * permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $KAME: mld6.c,v 1.27 2001/04/04 05:17:30 itojun Exp $
29 */
30
31 /*-
32 * Copyright (c) 1988 Stephen Deering.
33 * Copyright (c) 1992, 1993
34 * The Regents of the University of California. All rights reserved.
35 *
36 * This code is derived from software contributed to Berkeley by
37 * Stephen Deering of Stanford University.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
64 */
65
66 #include <sys/cdefs.h>
67 __FBSDID("$FreeBSD$");
68
69 #include "opt_inet.h"
70 #include "opt_inet6.h"
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/mbuf.h>
75 #include <sys/socket.h>
76 #include <sys/protosw.h>
77 #include <sys/sysctl.h>
78 #include <sys/kernel.h>
79 #include <sys/callout.h>
80 #include <sys/malloc.h>
81 #include <sys/module.h>
82 #include <sys/ktr.h>
83
84 #include <net/if.h>
85 #include <net/if_var.h>
86 #include <net/route.h>
87 #include <net/vnet.h>
88
89 #include <netinet/in.h>
90 #include <netinet/in_var.h>
91 #include <netinet6/in6_var.h>
92 #include <netinet/ip6.h>
93 #include <netinet6/ip6_var.h>
94 #include <netinet6/scope6_var.h>
95 #include <netinet/icmp6.h>
96 #include <netinet6/mld6.h>
97 #include <netinet6/mld6_var.h>
98
99 #include <security/mac/mac_framework.h>
100
101 #ifndef KTR_MLD
102 #define KTR_MLD KTR_INET6
103 #endif
104
105 static struct mld_ifsoftc *
106 mli_alloc_locked(struct ifnet *);
107 static void mli_delete_locked(const struct ifnet *);
108 static void mld_dispatch_packet(struct mbuf *);
109 static void mld_dispatch_queue(struct mbufq *, int);
110 static void mld_final_leave(struct in6_multi *, struct mld_ifsoftc *);
111 static void mld_fasttimo_vnet(void);
112 static int mld_handle_state_change(struct in6_multi *,
113 struct mld_ifsoftc *);
114 static int mld_initial_join(struct in6_multi *, struct mld_ifsoftc *,
115 const int);
116 #ifdef KTR
117 static char * mld_rec_type_to_str(const int);
118 #endif
119 static void mld_set_version(struct mld_ifsoftc *, const int);
120 static void mld_slowtimo_vnet(void);
121 static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
122 /*const*/ struct mld_hdr *);
123 static int mld_v1_input_report(struct ifnet *, const struct ip6_hdr *,
124 /*const*/ struct mld_hdr *);
125 static void mld_v1_process_group_timer(struct mld_ifsoftc *,
126 struct in6_multi *);
127 static void mld_v1_process_querier_timers(struct mld_ifsoftc *);
128 static int mld_v1_transmit_report(struct in6_multi *, const int);
129 static void mld_v1_update_group(struct in6_multi *, const int);
130 static void mld_v2_cancel_link_timers(struct mld_ifsoftc *);
131 static void mld_v2_dispatch_general_query(struct mld_ifsoftc *);
132 static struct mbuf *
133 mld_v2_encap_report(struct ifnet *, struct mbuf *);
134 static int mld_v2_enqueue_filter_change(struct mbufq *,
135 struct in6_multi *);
136 static int mld_v2_enqueue_group_record(struct mbufq *,
137 struct in6_multi *, const int, const int, const int,
138 const int);
139 static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
140 struct mbuf *, struct mldv2_query *, const int, const int);
141 static int mld_v2_merge_state_changes(struct in6_multi *,
142 struct mbufq *);
143 static void mld_v2_process_group_timers(struct mld_ifsoftc *,
144 struct mbufq *, struct mbufq *,
145 struct in6_multi *, const int);
146 static int mld_v2_process_group_query(struct in6_multi *,
147 struct mld_ifsoftc *mli, int, struct mbuf *,
148 struct mldv2_query *, const int);
149 static int sysctl_mld_gsr(SYSCTL_HANDLER_ARGS);
150 static int sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS);
151
152 /*
153 * Normative references: RFC 2710, RFC 3590, RFC 3810.
154 *
155 * Locking:
156 * * The MLD subsystem lock ends up being system-wide for the moment,
157 * but could be per-VIMAGE later on.
158 * * The permitted lock order is: IN6_MULTI_LOCK, MLD_LOCK, IF_ADDR_LOCK.
159 * Any may be taken independently; if any are held at the same
160 * time, the above lock order must be followed.
161 * * IN6_MULTI_LOCK covers in_multi.
162 * * MLD_LOCK covers per-link state and any global variables in this file.
163 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
164 * per-link state iterators.
165 *
166 * XXX LOR PREVENTION
167 * A special case for IPv6 is the in6_setscope() routine. ip6_output()
168 * will not accept an ifp; it wants an embedded scope ID, unlike
169 * ip_output(), which happily takes the ifp given to it. The embedded
170 * scope ID is only used by MLD to select the outgoing interface.
171 *
172 * During interface attach and detach, MLD will take MLD_LOCK *after*
173 * the IF_AFDATA_LOCK.
174 * As in6_setscope() takes IF_AFDATA_LOCK then SCOPE_LOCK, we can't call
175 * it with MLD_LOCK held without triggering an LOR. A netisr with indirect
176 * dispatch could work around this, but we'd rather not do that, as it
177 * can introduce other races.
178 *
179 * As such, we exploit the fact that the scope ID is just the interface
180 * index, and embed it in the IPv6 destination address accordingly.
181 * This is potentially NOT VALID for MLDv1 reports, as they
182 * are always sent to the multicast group itself; as MLDv2
183 * reports are always sent to ff02::16, this is not an issue
184 * when MLDv2 is in use.
185 *
186 * This does not however eliminate the LOR when ip6_output() itself
187 * calls in6_setscope() internally whilst MLD_LOCK is held. This will
188 * trigger a LOR warning in WITNESS when the ifnet is detached.
189 *
190 * The right answer is probably to make IF_AFDATA_LOCK an rwlock, given
191 * how it's used across the network stack. Here we're simply exploiting
192 * the fact that MLD runs at a similar layer in the stack to scope6.c.
193 *
194 * VIMAGE:
195 * * Each in6_multi corresponds to an ifp, and each ifp corresponds
196 * to a vnet in ifp->if_vnet.
197 */
198 static struct mtx mld_mtx;
199 static MALLOC_DEFINE(M_MLD, "mld", "mld state");
200
201 #define MLD_EMBEDSCOPE(pin6, zoneid) \
202 if (IN6_IS_SCOPE_LINKLOCAL(pin6) || \
203 IN6_IS_ADDR_MC_INTFACELOCAL(pin6)) \
204 (pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF) \
205
206 /*
207 * VIMAGE-wide globals.
208 */
209 static VNET_DEFINE(struct timeval, mld_gsrdelay) = {10, 0};
210 static VNET_DEFINE(LIST_HEAD(, mld_ifsoftc), mli_head);
211 static VNET_DEFINE(int, interface_timers_running6);
212 static VNET_DEFINE(int, state_change_timers_running6);
213 static VNET_DEFINE(int, current_state_timers_running6);
214
215 #define V_mld_gsrdelay VNET(mld_gsrdelay)
216 #define V_mli_head VNET(mli_head)
217 #define V_interface_timers_running6 VNET(interface_timers_running6)
218 #define V_state_change_timers_running6 VNET(state_change_timers_running6)
219 #define V_current_state_timers_running6 VNET(current_state_timers_running6)
220
221 SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */
222
223 SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW, 0,
224 "IPv6 Multicast Listener Discovery");
225
226 /*
227 * Virtualized sysctls.
228 */
229 SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay,
230 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
231 &VNET_NAME(mld_gsrdelay.tv_sec), 0, sysctl_mld_gsr, "I",
232 "Rate limit for MLDv2 Group-and-Source queries in seconds");
233
234 /*
235 * Non-virtualized sysctls.
236 */
237 static SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo,
238 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_mld_ifinfo,
239 "Per-interface MLDv2 state");
240
241 static int mld_v1enable = 1;
242 SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RWTUN,
243 &mld_v1enable, 0, "Enable fallback to MLDv1");
244
245 static int mld_use_allow = 1;
246 SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RWTUN,
247 &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
248
249 /*
250 * Packed Router Alert option structure declaration.
251 */
252 struct mld_raopt {
253 struct ip6_hbh hbh;
254 struct ip6_opt pad;
255 struct ip6_opt_router ra;
256 } __packed;
257
258 /*
259 * Router Alert hop-by-hop option header.
260 */
261 static struct mld_raopt mld_ra = {
262 .hbh = { 0, 0 },
263 .pad = { .ip6o_type = IP6OPT_PADN, 0 },
264 .ra = {
265 .ip6or_type = IP6OPT_ROUTER_ALERT,
266 .ip6or_len = IP6OPT_RTALERT_LEN - 2,
267 .ip6or_value[0] = ((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
268 .ip6or_value[1] = (IP6OPT_RTALERT_MLD & 0xFF)
269 }
270 };
271 static struct ip6_pktopts mld_po;
272
273 static __inline void
274 mld_save_context(struct mbuf *m, struct ifnet *ifp)
275 {
276
277 #ifdef VIMAGE
278 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
279 #endif /* VIMAGE */
280 m->m_pkthdr.rcvif = ifp;
281 m->m_pkthdr.flowid = ifp->if_index;
282 }
283
284 static __inline void
285 mld_scrub_context(struct mbuf *m)
286 {
287
288 m->m_pkthdr.PH_loc.ptr = NULL;
289 m->m_pkthdr.flowid = 0;
290 }
291
292 /*
293 * Restore context from a queued output chain.
294 * Return saved ifindex.
295 *
296 * VIMAGE: The assertion is there to make sure that we
297 * actually called CURVNET_SET() with what's in the mbuf chain.
298 */
299 static __inline uint32_t
300 mld_restore_context(struct mbuf *m)
301 {
302
303 #if defined(VIMAGE) && defined(INVARIANTS)
304 KASSERT(curvnet == m->m_pkthdr.PH_loc.ptr,
305 ("%s: called when curvnet was not restored: cuvnet %p m ptr %p",
306 __func__, curvnet, m->m_pkthdr.PH_loc.ptr));
307 #endif
308 return (m->m_pkthdr.flowid);
309 }
310
311 /*
312 * Retrieve or set threshold between group-source queries in seconds.
313 *
314 * VIMAGE: Assume curvnet set by caller.
315 * SMPng: NOTE: Serialized by MLD lock.
316 */
317 static int
318 sysctl_mld_gsr(SYSCTL_HANDLER_ARGS)
319 {
320 int error;
321 int i;
322
323 error = sysctl_wire_old_buffer(req, sizeof(int));
324 if (error)
325 return (error);
326
327 MLD_LOCK();
328
329 i = V_mld_gsrdelay.tv_sec;
330
331 error = sysctl_handle_int(oidp, &i, 0, req);
332 if (error || !req->newptr)
333 goto out_locked;
334
335 if (i < -1 || i >= 60) {
336 error = EINVAL;
337 goto out_locked;
338 }
339
340 CTR2(KTR_MLD, "change mld_gsrdelay from %d to %d",
341 V_mld_gsrdelay.tv_sec, i);
342 V_mld_gsrdelay.tv_sec = i;
343
344 out_locked:
345 MLD_UNLOCK();
346 return (error);
347 }
348
349 /*
350 * Expose struct mld_ifsoftc to userland, keyed by ifindex.
351 * For use by ifmcstat(8).
352 *
353 * SMPng: NOTE: Does an unlocked ifindex space read.
354 * VIMAGE: Assume curvnet set by caller. The node handler itself
355 * is not directly virtualized.
356 */
357 static int
358 sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS)
359 {
360 int *name;
361 int error;
362 u_int namelen;
363 struct ifnet *ifp;
364 struct mld_ifsoftc *mli;
365
366 name = (int *)arg1;
367 namelen = arg2;
368
369 if (req->newptr != NULL)
370 return (EPERM);
371
372 if (namelen != 1)
373 return (EINVAL);
374
375 error = sysctl_wire_old_buffer(req, sizeof(struct mld_ifinfo));
376 if (error)
377 return (error);
378
379 IN6_MULTI_LOCK();
380 MLD_LOCK();
381
382 if (name[0] <= 0 || name[0] > V_if_index) {
383 error = ENOENT;
384 goto out_locked;
385 }
386
387 error = ENOENT;
388
389 ifp = ifnet_byindex(name[0]);
390 if (ifp == NULL)
391 goto out_locked;
392
393 LIST_FOREACH(mli, &V_mli_head, mli_link) {
394 if (ifp == mli->mli_ifp) {
395 struct mld_ifinfo info;
396
397 info.mli_version = mli->mli_version;
398 info.mli_v1_timer = mli->mli_v1_timer;
399 info.mli_v2_timer = mli->mli_v2_timer;
400 info.mli_flags = mli->mli_flags;
401 info.mli_rv = mli->mli_rv;
402 info.mli_qi = mli->mli_qi;
403 info.mli_qri = mli->mli_qri;
404 info.mli_uri = mli->mli_uri;
405 error = SYSCTL_OUT(req, &info, sizeof(info));
406 break;
407 }
408 }
409
410 out_locked:
411 MLD_UNLOCK();
412 IN6_MULTI_UNLOCK();
413 return (error);
414 }
415
416 /*
417 * Dispatch an entire queue of pending packet chains.
418 * VIMAGE: Assumes the vnet pointer has been set.
419 */
420 static void
421 mld_dispatch_queue(struct mbufq *mq, int limit)
422 {
423 struct mbuf *m;
424
425 while ((m = mbufq_dequeue(mq)) != NULL) {
426 CTR3(KTR_MLD, "%s: dispatch %p from %p", __func__, mq, m);
427 mld_dispatch_packet(m);
428 if (--limit == 0)
429 break;
430 }
431 }
432
433 /*
434 * Filter outgoing MLD report state by group.
435 *
436 * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
437 * and node-local addresses. However, kernel and socket consumers
438 * always embed the KAME scope ID in the address provided, so strip it
439 * when performing comparison.
440 * Note: This is not the same as the *multicast* scope.
441 *
442 * Return zero if the given group is one for which MLD reports
443 * should be suppressed, or non-zero if reports should be issued.
444 */
445 static __inline int
446 mld_is_addr_reported(const struct in6_addr *addr)
447 {
448
449 KASSERT(IN6_IS_ADDR_MULTICAST(addr), ("%s: not multicast", __func__));
450
451 if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL)
452 return (0);
453
454 if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) {
455 struct in6_addr tmp = *addr;
456 in6_clearscope(&tmp);
457 if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes))
458 return (0);
459 }
460
461 return (1);
462 }
463
464 /*
465 * Attach MLD when PF_INET6 is attached to an interface.
466 *
467 * SMPng: Normally called with IF_AFDATA_LOCK held.
468 */
469 struct mld_ifsoftc *
470 mld_domifattach(struct ifnet *ifp)
471 {
472 struct mld_ifsoftc *mli;
473
474 CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
475 __func__, ifp, if_name(ifp));
476
477 MLD_LOCK();
478
479 mli = mli_alloc_locked(ifp);
480 if (!(ifp->if_flags & IFF_MULTICAST))
481 mli->mli_flags |= MLIF_SILENT;
482 if (mld_use_allow)
483 mli->mli_flags |= MLIF_USEALLOW;
484
485 MLD_UNLOCK();
486
487 return (mli);
488 }
489
490 /*
491 * VIMAGE: assume curvnet set by caller.
492 */
493 static struct mld_ifsoftc *
494 mli_alloc_locked(/*const*/ struct ifnet *ifp)
495 {
496 struct mld_ifsoftc *mli;
497
498 MLD_LOCK_ASSERT();
499
500 mli = malloc(sizeof(struct mld_ifsoftc), M_MLD, M_NOWAIT|M_ZERO);
501 if (mli == NULL)
502 goto out;
503
504 mli->mli_ifp = ifp;
505 mli->mli_version = MLD_VERSION_2;
506 mli->mli_flags = 0;
507 mli->mli_rv = MLD_RV_INIT;
508 mli->mli_qi = MLD_QI_INIT;
509 mli->mli_qri = MLD_QRI_INIT;
510 mli->mli_uri = MLD_URI_INIT;
511 SLIST_INIT(&mli->mli_relinmhead);
512 mbufq_init(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS);
513
514 LIST_INSERT_HEAD(&V_mli_head, mli, mli_link);
515
516 CTR2(KTR_MLD, "allocate mld_ifsoftc for ifp %p(%s)",
517 ifp, if_name(ifp));
518
519 out:
520 return (mli);
521 }
522
523 /*
524 * Hook for ifdetach.
525 *
526 * NOTE: Some finalization tasks need to run before the protocol domain
527 * is detached, but also before the link layer does its cleanup.
528 * Run before link-layer cleanup; cleanup groups, but do not free MLD state.
529 *
530 * SMPng: Caller must hold IN6_MULTI_LOCK().
531 * Must take IF_ADDR_LOCK() to cover if_multiaddrs iterator.
532 * XXX This routine is also bitten by unlocked ifma_protospec access.
533 */
534 void
535 mld_ifdetach(struct ifnet *ifp)
536 {
537 struct mld_ifsoftc *mli;
538 struct ifmultiaddr *ifma;
539 struct in6_multi *inm, *tinm;
540
541 CTR3(KTR_MLD, "%s: called for ifp %p(%s)", __func__, ifp,
542 if_name(ifp));
543
544 IN6_MULTI_LOCK_ASSERT();
545 MLD_LOCK();
546
547 mli = MLD_IFINFO(ifp);
548 if (mli->mli_version == MLD_VERSION_2) {
549 IF_ADDR_RLOCK(ifp);
550 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
551 if (ifma->ifma_addr->sa_family != AF_INET6 ||
552 ifma->ifma_protospec == NULL)
553 continue;
554 inm = (struct in6_multi *)ifma->ifma_protospec;
555 if (inm->in6m_state == MLD_LEAVING_MEMBER) {
556 SLIST_INSERT_HEAD(&mli->mli_relinmhead,
557 inm, in6m_nrele);
558 }
559 in6m_clear_recorded(inm);
560 }
561 IF_ADDR_RUNLOCK(ifp);
562 SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead, in6m_nrele,
563 tinm) {
564 SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
565 in6m_release_locked(inm);
566 }
567 }
568
569 MLD_UNLOCK();
570 }
571
572 /*
573 * Hook for domifdetach.
574 * Runs after link-layer cleanup; free MLD state.
575 *
576 * SMPng: Normally called with IF_AFDATA_LOCK held.
577 */
578 void
579 mld_domifdetach(struct ifnet *ifp)
580 {
581
582 CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
583 __func__, ifp, if_name(ifp));
584
585 MLD_LOCK();
586 mli_delete_locked(ifp);
587 MLD_UNLOCK();
588 }
589
590 static void
591 mli_delete_locked(const struct ifnet *ifp)
592 {
593 struct mld_ifsoftc *mli, *tmli;
594
595 CTR3(KTR_MLD, "%s: freeing mld_ifsoftc for ifp %p(%s)",
596 __func__, ifp, if_name(ifp));
597
598 MLD_LOCK_ASSERT();
599
600 LIST_FOREACH_SAFE(mli, &V_mli_head, mli_link, tmli) {
601 if (mli->mli_ifp == ifp) {
602 /*
603 * Free deferred General Query responses.
604 */
605 mbufq_drain(&mli->mli_gq);
606
607 LIST_REMOVE(mli, mli_link);
608
609 KASSERT(SLIST_EMPTY(&mli->mli_relinmhead),
610 ("%s: there are dangling in_multi references",
611 __func__));
612
613 free(mli, M_MLD);
614 return;
615 }
616 }
617 }
618
619 /*
620 * Process a received MLDv1 general or address-specific query.
621 * Assumes that the query header has been pulled up to sizeof(mld_hdr).
622 *
623 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
624 * mld_addr. This is OK as we own the mbuf chain.
625 */
626 static int
627 mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
628 /*const*/ struct mld_hdr *mld)
629 {
630 struct ifmultiaddr *ifma;
631 struct mld_ifsoftc *mli;
632 struct in6_multi *inm;
633 int is_general_query;
634 uint16_t timer;
635 #ifdef KTR
636 char ip6tbuf[INET6_ADDRSTRLEN];
637 #endif
638
639 is_general_query = 0;
640
641 if (!mld_v1enable) {
642 CTR3(KTR_MLD, "ignore v1 query %s on ifp %p(%s)",
643 ip6_sprintf(ip6tbuf, &mld->mld_addr),
644 ifp, if_name(ifp));
645 return (0);
646 }
647
648 /*
649 * RFC3810 Section 6.2: MLD queries must originate from
650 * a router's link-local address.
651 */
652 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
653 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
654 ip6_sprintf(ip6tbuf, &ip6->ip6_src),
655 ifp, if_name(ifp));
656 return (0);
657 }
658
659 /*
660 * Do address field validation upfront before we accept
661 * the query.
662 */
663 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
664 /*
665 * MLDv1 General Query.
666 * If this was not sent to the all-nodes group, ignore it.
667 */
668 struct in6_addr dst;
669
670 dst = ip6->ip6_dst;
671 in6_clearscope(&dst);
672 if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes))
673 return (EINVAL);
674 is_general_query = 1;
675 } else {
676 /*
677 * Embed scope ID of receiving interface in MLD query for
678 * lookup whilst we don't hold other locks.
679 */
680 in6_setscope(&mld->mld_addr, ifp, NULL);
681 }
682
683 IN6_MULTI_LOCK();
684 MLD_LOCK();
685
686 /*
687 * Switch to MLDv1 host compatibility mode.
688 */
689 mli = MLD_IFINFO(ifp);
690 KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
691 mld_set_version(mli, MLD_VERSION_1);
692
693 timer = (ntohs(mld->mld_maxdelay) * PR_FASTHZ) / MLD_TIMER_SCALE;
694 if (timer == 0)
695 timer = 1;
696
697 IF_ADDR_RLOCK(ifp);
698 if (is_general_query) {
699 /*
700 * For each reporting group joined on this
701 * interface, kick the report timer.
702 */
703 CTR2(KTR_MLD, "process v1 general query on ifp %p(%s)",
704 ifp, if_name(ifp));
705 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
706 if (ifma->ifma_addr->sa_family != AF_INET6 ||
707 ifma->ifma_protospec == NULL)
708 continue;
709 inm = (struct in6_multi *)ifma->ifma_protospec;
710 mld_v1_update_group(inm, timer);
711 }
712 } else {
713 /*
714 * MLDv1 Group-Specific Query.
715 * If this is a group-specific MLDv1 query, we need only
716 * look up the single group to process it.
717 */
718 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
719 if (inm != NULL) {
720 CTR3(KTR_MLD, "process v1 query %s on ifp %p(%s)",
721 ip6_sprintf(ip6tbuf, &mld->mld_addr),
722 ifp, if_name(ifp));
723 mld_v1_update_group(inm, timer);
724 }
725 /* XXX Clear embedded scope ID as userland won't expect it. */
726 in6_clearscope(&mld->mld_addr);
727 }
728
729 IF_ADDR_RUNLOCK(ifp);
730 MLD_UNLOCK();
731 IN6_MULTI_UNLOCK();
732
733 return (0);
734 }
735
736 /*
737 * Update the report timer on a group in response to an MLDv1 query.
738 *
739 * If we are becoming the reporting member for this group, start the timer.
740 * If we already are the reporting member for this group, and timer is
741 * below the threshold, reset it.
742 *
743 * We may be updating the group for the first time since we switched
744 * to MLDv2. If we are, then we must clear any recorded source lists,
745 * and transition to REPORTING state; the group timer is overloaded
746 * for group and group-source query responses.
747 *
748 * Unlike MLDv2, the delay per group should be jittered
749 * to avoid bursts of MLDv1 reports.
750 */
751 static void
752 mld_v1_update_group(struct in6_multi *inm, const int timer)
753 {
754 #ifdef KTR
755 char ip6tbuf[INET6_ADDRSTRLEN];
756 #endif
757
758 CTR4(KTR_MLD, "%s: %s/%s timer=%d", __func__,
759 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
760 if_name(inm->in6m_ifp), timer);
761
762 IN6_MULTI_LOCK_ASSERT();
763
764 switch (inm->in6m_state) {
765 case MLD_NOT_MEMBER:
766 case MLD_SILENT_MEMBER:
767 break;
768 case MLD_REPORTING_MEMBER:
769 if (inm->in6m_timer != 0 &&
770 inm->in6m_timer <= timer) {
771 CTR1(KTR_MLD, "%s: REPORTING and timer running, "
772 "skipping.", __func__);
773 break;
774 }
775 /* FALLTHROUGH */
776 case MLD_SG_QUERY_PENDING_MEMBER:
777 case MLD_G_QUERY_PENDING_MEMBER:
778 case MLD_IDLE_MEMBER:
779 case MLD_LAZY_MEMBER:
780 case MLD_AWAKENING_MEMBER:
781 CTR1(KTR_MLD, "%s: ->REPORTING", __func__);
782 inm->in6m_state = MLD_REPORTING_MEMBER;
783 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
784 V_current_state_timers_running6 = 1;
785 break;
786 case MLD_SLEEPING_MEMBER:
787 CTR1(KTR_MLD, "%s: ->AWAKENING", __func__);
788 inm->in6m_state = MLD_AWAKENING_MEMBER;
789 break;
790 case MLD_LEAVING_MEMBER:
791 break;
792 }
793 }
794
795 /*
796 * Process a received MLDv2 general, group-specific or
797 * group-and-source-specific query.
798 *
799 * Assumes that mld points to a struct mldv2_query which is stored in
800 * contiguous memory.
801 *
802 * Return 0 if successful, otherwise an appropriate error code is returned.
803 */
804 static int
805 mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
806 struct mbuf *m, struct mldv2_query *mld, const int off, const int icmp6len)
807 {
808 struct mld_ifsoftc *mli;
809 struct in6_multi *inm;
810 uint32_t maxdelay, nsrc, qqi;
811 int is_general_query;
812 uint16_t timer;
813 uint8_t qrv;
814 #ifdef KTR
815 char ip6tbuf[INET6_ADDRSTRLEN];
816 #endif
817
818 is_general_query = 0;
819
820 /*
821 * RFC3810 Section 6.2: MLD queries must originate from
822 * a router's link-local address.
823 */
824 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
825 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
826 ip6_sprintf(ip6tbuf, &ip6->ip6_src),
827 ifp, if_name(ifp));
828 return (0);
829 }
830
831 CTR2(KTR_MLD, "input v2 query on ifp %p(%s)", ifp, if_name(ifp));
832
833 maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */
834 if (maxdelay >= 32768) {
835 maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) <<
836 (MLD_MRC_EXP(maxdelay) + 3);
837 }
838 timer = (maxdelay * PR_FASTHZ) / MLD_TIMER_SCALE;
839 if (timer == 0)
840 timer = 1;
841
842 qrv = MLD_QRV(mld->mld_misc);
843 if (qrv < 2) {
844 CTR3(KTR_MLD, "%s: clamping qrv %d to %d", __func__,
845 qrv, MLD_RV_INIT);
846 qrv = MLD_RV_INIT;
847 }
848
849 qqi = mld->mld_qqi;
850 if (qqi >= 128) {
851 qqi = MLD_QQIC_MANT(mld->mld_qqi) <<
852 (MLD_QQIC_EXP(mld->mld_qqi) + 3);
853 }
854
855 nsrc = ntohs(mld->mld_numsrc);
856 if (nsrc > MLD_MAX_GS_SOURCES)
857 return (EMSGSIZE);
858 if (icmp6len < sizeof(struct mldv2_query) +
859 (nsrc * sizeof(struct in6_addr)))
860 return (EMSGSIZE);
861
862 /*
863 * Do further input validation upfront to avoid resetting timers
864 * should we need to discard this query.
865 */
866 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
867 /*
868 * A general query with a source list has undefined
869 * behaviour; discard it.
870 */
871 if (nsrc > 0)
872 return (EINVAL);
873 is_general_query = 1;
874 } else {
875 /*
876 * Embed scope ID of receiving interface in MLD query for
877 * lookup whilst we don't hold other locks (due to KAME
878 * locking lameness). We own this mbuf chain just now.
879 */
880 in6_setscope(&mld->mld_addr, ifp, NULL);
881 }
882
883 IN6_MULTI_LOCK();
884 MLD_LOCK();
885
886 mli = MLD_IFINFO(ifp);
887 KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
888
889 /*
890 * Discard the v2 query if we're in Compatibility Mode.
891 * The RFC is pretty clear that hosts need to stay in MLDv1 mode
892 * until the Old Version Querier Present timer expires.
893 */
894 if (mli->mli_version != MLD_VERSION_2)
895 goto out_locked;
896
897 mld_set_version(mli, MLD_VERSION_2);
898 mli->mli_rv = qrv;
899 mli->mli_qi = qqi;
900 mli->mli_qri = maxdelay;
901
902 CTR4(KTR_MLD, "%s: qrv %d qi %d maxdelay %d", __func__, qrv, qqi,
903 maxdelay);
904
905 if (is_general_query) {
906 /*
907 * MLDv2 General Query.
908 *
909 * Schedule a current-state report on this ifp for
910 * all groups, possibly containing source lists.
911 *
912 * If there is a pending General Query response
913 * scheduled earlier than the selected delay, do
914 * not schedule any other reports.
915 * Otherwise, reset the interface timer.
916 */
917 CTR2(KTR_MLD, "process v2 general query on ifp %p(%s)",
918 ifp, if_name(ifp));
919 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
920 mli->mli_v2_timer = MLD_RANDOM_DELAY(timer);
921 V_interface_timers_running6 = 1;
922 }
923 } else {
924 /*
925 * MLDv2 Group-specific or Group-and-source-specific Query.
926 *
927 * Group-source-specific queries are throttled on
928 * a per-group basis to defeat denial-of-service attempts.
929 * Queries for groups we are not a member of on this
930 * link are simply ignored.
931 */
932 IF_ADDR_RLOCK(ifp);
933 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
934 if (inm == NULL) {
935 IF_ADDR_RUNLOCK(ifp);
936 goto out_locked;
937 }
938 if (nsrc > 0) {
939 if (!ratecheck(&inm->in6m_lastgsrtv,
940 &V_mld_gsrdelay)) {
941 CTR1(KTR_MLD, "%s: GS query throttled.",
942 __func__);
943 IF_ADDR_RUNLOCK(ifp);
944 goto out_locked;
945 }
946 }
947 CTR2(KTR_MLD, "process v2 group query on ifp %p(%s)",
948 ifp, if_name(ifp));
949 /*
950 * If there is a pending General Query response
951 * scheduled sooner than the selected delay, no
952 * further report need be scheduled.
953 * Otherwise, prepare to respond to the
954 * group-specific or group-and-source query.
955 */
956 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer)
957 mld_v2_process_group_query(inm, mli, timer, m, mld, off);
958
959 /* XXX Clear embedded scope ID as userland won't expect it. */
960 in6_clearscope(&mld->mld_addr);
961 IF_ADDR_RUNLOCK(ifp);
962 }
963
964 out_locked:
965 MLD_UNLOCK();
966 IN6_MULTI_UNLOCK();
967
968 return (0);
969 }
970
971 /*
972 * Process a received MLDv2 group-specific or group-and-source-specific
973 * query.
974 * Return <0 if any error occurred. Currently this is ignored.
975 */
976 static int
977 mld_v2_process_group_query(struct in6_multi *inm, struct mld_ifsoftc *mli,
978 int timer, struct mbuf *m0, struct mldv2_query *mld, const int off)
979 {
980 int retval;
981 uint16_t nsrc;
982
983 IN6_MULTI_LOCK_ASSERT();
984 MLD_LOCK_ASSERT();
985
986 retval = 0;
987
988 switch (inm->in6m_state) {
989 case MLD_NOT_MEMBER:
990 case MLD_SILENT_MEMBER:
991 case MLD_SLEEPING_MEMBER:
992 case MLD_LAZY_MEMBER:
993 case MLD_AWAKENING_MEMBER:
994 case MLD_IDLE_MEMBER:
995 case MLD_LEAVING_MEMBER:
996 return (retval);
997 break;
998 case MLD_REPORTING_MEMBER:
999 case MLD_G_QUERY_PENDING_MEMBER:
1000 case MLD_SG_QUERY_PENDING_MEMBER:
1001 break;
1002 }
1003
1004 nsrc = ntohs(mld->mld_numsrc);
1005
1006 /* Length should be checked by calling function. */
1007 KASSERT((m0->m_flags & M_PKTHDR) == 0 ||
1008 m0->m_pkthdr.len >= off + sizeof(struct mldv2_query) +
1009 nsrc * sizeof(struct in6_addr),
1010 ("mldv2 packet is too short: (%d bytes < %zd bytes, m=%p)",
1011 m0->m_pkthdr.len, off + sizeof(struct mldv2_query) +
1012 nsrc * sizeof(struct in6_addr), m0));
1013
1014
1015 /*
1016 * Deal with group-specific queries upfront.
1017 * If any group query is already pending, purge any recorded
1018 * source-list state if it exists, and schedule a query response
1019 * for this group-specific query.
1020 */
1021 if (nsrc == 0) {
1022 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
1023 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
1024 in6m_clear_recorded(inm);
1025 timer = min(inm->in6m_timer, timer);
1026 }
1027 inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER;
1028 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1029 V_current_state_timers_running6 = 1;
1030 return (retval);
1031 }
1032
1033 /*
1034 * Deal with the case where a group-and-source-specific query has
1035 * been received but a group-specific query is already pending.
1036 */
1037 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) {
1038 timer = min(inm->in6m_timer, timer);
1039 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1040 V_current_state_timers_running6 = 1;
1041 return (retval);
1042 }
1043
1044 /*
1045 * Finally, deal with the case where a group-and-source-specific
1046 * query has been received, where a response to a previous g-s-r
1047 * query exists, or none exists.
1048 * In this case, we need to parse the source-list which the Querier
1049 * has provided us with and check if we have any source list filter
1050 * entries at T1 for these sources. If we do not, there is no need
1051 * schedule a report and the query may be dropped.
1052 * If we do, we must record them and schedule a current-state
1053 * report for those sources.
1054 */
1055 if (inm->in6m_nsrc > 0) {
1056 struct in6_addr srcaddr;
1057 int i, nrecorded;
1058 int soff;
1059
1060 soff = off + sizeof(struct mldv2_query);
1061 nrecorded = 0;
1062 for (i = 0; i < nsrc; i++) {
1063 m_copydata(m0, soff, sizeof(struct in6_addr),
1064 (caddr_t)&srcaddr);
1065 retval = in6m_record_source(inm, &srcaddr);
1066 if (retval < 0)
1067 break;
1068 nrecorded += retval;
1069 soff += sizeof(struct in6_addr);
1070 }
1071 if (nrecorded > 0) {
1072 CTR1(KTR_MLD,
1073 "%s: schedule response to SG query", __func__);
1074 inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER;
1075 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1076 V_current_state_timers_running6 = 1;
1077 }
1078 }
1079
1080 return (retval);
1081 }
1082
1083 /*
1084 * Process a received MLDv1 host membership report.
1085 * Assumes mld points to mld_hdr in pulled up mbuf chain.
1086 *
1087 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
1088 * mld_addr. This is OK as we own the mbuf chain.
1089 */
1090 static int
1091 mld_v1_input_report(struct ifnet *ifp, const struct ip6_hdr *ip6,
1092 /*const*/ struct mld_hdr *mld)
1093 {
1094 struct in6_addr src, dst;
1095 struct in6_ifaddr *ia;
1096 struct in6_multi *inm;
1097 #ifdef KTR
1098 char ip6tbuf[INET6_ADDRSTRLEN];
1099 #endif
1100
1101 if (!mld_v1enable) {
1102 CTR3(KTR_MLD, "ignore v1 report %s on ifp %p(%s)",
1103 ip6_sprintf(ip6tbuf, &mld->mld_addr),
1104 ifp, if_name(ifp));
1105 return (0);
1106 }
1107
1108 if (ifp->if_flags & IFF_LOOPBACK)
1109 return (0);
1110
1111 /*
1112 * MLDv1 reports must originate from a host's link-local address,
1113 * or the unspecified address (when booting).
1114 */
1115 src = ip6->ip6_src;
1116 in6_clearscope(&src);
1117 if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) {
1118 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
1119 ip6_sprintf(ip6tbuf, &ip6->ip6_src),
1120 ifp, if_name(ifp));
1121 return (EINVAL);
1122 }
1123
1124 /*
1125 * RFC2710 Section 4: MLDv1 reports must pertain to a multicast
1126 * group, and must be directed to the group itself.
1127 */
1128 dst = ip6->ip6_dst;
1129 in6_clearscope(&dst);
1130 if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) ||
1131 !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) {
1132 CTR3(KTR_MLD, "ignore v1 query dst %s on ifp %p(%s)",
1133 ip6_sprintf(ip6tbuf, &ip6->ip6_dst),
1134 ifp, if_name(ifp));
1135 return (EINVAL);
1136 }
1137
1138 /*
1139 * Make sure we don't hear our own membership report, as fast
1140 * leave requires knowing that we are the only member of a
1141 * group. Assume we used the link-local address if available,
1142 * otherwise look for ::.
1143 *
1144 * XXX Note that scope ID comparison is needed for the address
1145 * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
1146 * performed for the on-wire address.
1147 */
1148 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
1149 if ((ia && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia))) ||
1150 (ia == NULL && IN6_IS_ADDR_UNSPECIFIED(&src))) {
1151 if (ia != NULL)
1152 ifa_free(&ia->ia_ifa);
1153 return (0);
1154 }
1155 if (ia != NULL)
1156 ifa_free(&ia->ia_ifa);
1157
1158 CTR3(KTR_MLD, "process v1 report %s on ifp %p(%s)",
1159 ip6_sprintf(ip6tbuf, &mld->mld_addr), ifp, if_name(ifp));
1160
1161 /*
1162 * Embed scope ID of receiving interface in MLD query for lookup
1163 * whilst we don't hold other locks (due to KAME locking lameness).
1164 */
1165 if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr))
1166 in6_setscope(&mld->mld_addr, ifp, NULL);
1167
1168 IN6_MULTI_LOCK();
1169 MLD_LOCK();
1170 IF_ADDR_RLOCK(ifp);
1171
1172 /*
1173 * MLDv1 report suppression.
1174 * If we are a member of this group, and our membership should be
1175 * reported, and our group timer is pending or about to be reset,
1176 * stop our group timer by transitioning to the 'lazy' state.
1177 */
1178 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
1179 if (inm != NULL) {
1180 struct mld_ifsoftc *mli;
1181
1182 mli = inm->in6m_mli;
1183 KASSERT(mli != NULL,
1184 ("%s: no mli for ifp %p", __func__, ifp));
1185
1186 /*
1187 * If we are in MLDv2 host mode, do not allow the
1188 * other host's MLDv1 report to suppress our reports.
1189 */
1190 if (mli->mli_version == MLD_VERSION_2)
1191 goto out_locked;
1192
1193 inm->in6m_timer = 0;
1194
1195 switch (inm->in6m_state) {
1196 case MLD_NOT_MEMBER:
1197 case MLD_SILENT_MEMBER:
1198 case MLD_SLEEPING_MEMBER:
1199 break;
1200 case MLD_REPORTING_MEMBER:
1201 case MLD_IDLE_MEMBER:
1202 case MLD_AWAKENING_MEMBER:
1203 CTR3(KTR_MLD,
1204 "report suppressed for %s on ifp %p(%s)",
1205 ip6_sprintf(ip6tbuf, &mld->mld_addr),
1206 ifp, if_name(ifp));
1207 case MLD_LAZY_MEMBER:
1208 inm->in6m_state = MLD_LAZY_MEMBER;
1209 break;
1210 case MLD_G_QUERY_PENDING_MEMBER:
1211 case MLD_SG_QUERY_PENDING_MEMBER:
1212 case MLD_LEAVING_MEMBER:
1213 break;
1214 }
1215 }
1216
1217 out_locked:
1218 IF_ADDR_RUNLOCK(ifp);
1219 MLD_UNLOCK();
1220 IN6_MULTI_UNLOCK();
1221
1222 /* XXX Clear embedded scope ID as userland won't expect it. */
1223 in6_clearscope(&mld->mld_addr);
1224
1225 return (0);
1226 }
1227
1228 /*
1229 * MLD input path.
1230 *
1231 * Assume query messages which fit in a single ICMPv6 message header
1232 * have been pulled up.
1233 * Assume that userland will want to see the message, even if it
1234 * otherwise fails kernel input validation; do not free it.
1235 * Pullup may however free the mbuf chain m if it fails.
1236 *
1237 * Return IPPROTO_DONE if we freed m. Otherwise, return 0.
1238 */
1239 int
1240 mld_input(struct mbuf *m, int off, int icmp6len)
1241 {
1242 struct ifnet *ifp;
1243 struct ip6_hdr *ip6;
1244 struct mld_hdr *mld;
1245 int mldlen;
1246
1247 CTR3(KTR_MLD, "%s: called w/mbuf (%p,%d)", __func__, m, off);
1248
1249 ifp = m->m_pkthdr.rcvif;
1250
1251 ip6 = mtod(m, struct ip6_hdr *);
1252
1253 /* Pullup to appropriate size. */
1254 mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
1255 if (mld->mld_type == MLD_LISTENER_QUERY &&
1256 icmp6len >= sizeof(struct mldv2_query)) {
1257 mldlen = sizeof(struct mldv2_query);
1258 } else {
1259 mldlen = sizeof(struct mld_hdr);
1260 }
1261 IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen);
1262 if (mld == NULL) {
1263 ICMP6STAT_INC(icp6s_badlen);
1264 return (IPPROTO_DONE);
1265 }
1266
1267 /*
1268 * Userland needs to see all of this traffic for implementing
1269 * the endpoint discovery portion of multicast routing.
1270 */
1271 switch (mld->mld_type) {
1272 case MLD_LISTENER_QUERY:
1273 icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
1274 if (icmp6len == sizeof(struct mld_hdr)) {
1275 if (mld_v1_input_query(ifp, ip6, mld) != 0)
1276 return (0);
1277 } else if (icmp6len >= sizeof(struct mldv2_query)) {
1278 if (mld_v2_input_query(ifp, ip6, m,
1279 (struct mldv2_query *)mld, off, icmp6len) != 0)
1280 return (0);
1281 }
1282 break;
1283 case MLD_LISTENER_REPORT:
1284 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1285 if (mld_v1_input_report(ifp, ip6, mld) != 0)
1286 return (0);
1287 break;
1288 case MLDV2_LISTENER_REPORT:
1289 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1290 break;
1291 case MLD_LISTENER_DONE:
1292 icmp6_ifstat_inc(ifp, ifs6_in_mlddone);
1293 break;
1294 default:
1295 break;
1296 }
1297
1298 return (0);
1299 }
1300
1301 /*
1302 * Fast timeout handler (global).
1303 * VIMAGE: Timeout handlers are expected to service all vimages.
1304 */
1305 void
1306 mld_fasttimo(void)
1307 {
1308 VNET_ITERATOR_DECL(vnet_iter);
1309
1310 VNET_LIST_RLOCK_NOSLEEP();
1311 VNET_FOREACH(vnet_iter) {
1312 CURVNET_SET(vnet_iter);
1313 mld_fasttimo_vnet();
1314 CURVNET_RESTORE();
1315 }
1316 VNET_LIST_RUNLOCK_NOSLEEP();
1317 }
1318
1319 /*
1320 * Fast timeout handler (per-vnet).
1321 *
1322 * VIMAGE: Assume caller has set up our curvnet.
1323 */
1324 static void
1325 mld_fasttimo_vnet(void)
1326 {
1327 struct mbufq scq; /* State-change packets */
1328 struct mbufq qrq; /* Query response packets */
1329 struct ifnet *ifp;
1330 struct mld_ifsoftc *mli;
1331 struct ifmultiaddr *ifma;
1332 struct in6_multi *inm, *tinm;
1333 int uri_fasthz;
1334
1335 uri_fasthz = 0;
1336
1337 /*
1338 * Quick check to see if any work needs to be done, in order to
1339 * minimize the overhead of fasttimo processing.
1340 * SMPng: XXX Unlocked reads.
1341 */
1342 if (!V_current_state_timers_running6 &&
1343 !V_interface_timers_running6 &&
1344 !V_state_change_timers_running6)
1345 return;
1346
1347 IN6_MULTI_LOCK();
1348 MLD_LOCK();
1349
1350 /*
1351 * MLDv2 General Query response timer processing.
1352 */
1353 if (V_interface_timers_running6) {
1354 CTR1(KTR_MLD, "%s: interface timers running", __func__);
1355
1356 V_interface_timers_running6 = 0;
1357 LIST_FOREACH(mli, &V_mli_head, mli_link) {
1358 if (mli->mli_v2_timer == 0) {
1359 /* Do nothing. */
1360 } else if (--mli->mli_v2_timer == 0) {
1361 mld_v2_dispatch_general_query(mli);
1362 } else {
1363 V_interface_timers_running6 = 1;
1364 }
1365 }
1366 }
1367
1368 if (!V_current_state_timers_running6 &&
1369 !V_state_change_timers_running6)
1370 goto out_locked;
1371
1372 V_current_state_timers_running6 = 0;
1373 V_state_change_timers_running6 = 0;
1374
1375 CTR1(KTR_MLD, "%s: state change timers running", __func__);
1376
1377 /*
1378 * MLD host report and state-change timer processing.
1379 * Note: Processing a v2 group timer may remove a node.
1380 */
1381 LIST_FOREACH(mli, &V_mli_head, mli_link) {
1382 ifp = mli->mli_ifp;
1383
1384 if (mli->mli_version == MLD_VERSION_2) {
1385 uri_fasthz = MLD_RANDOM_DELAY(mli->mli_uri *
1386 PR_FASTHZ);
1387 mbufq_init(&qrq, MLD_MAX_G_GS_PACKETS);
1388 mbufq_init(&scq, MLD_MAX_STATE_CHANGE_PACKETS);
1389 }
1390
1391 IF_ADDR_RLOCK(ifp);
1392 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1393 if (ifma->ifma_addr->sa_family != AF_INET6 ||
1394 ifma->ifma_protospec == NULL)
1395 continue;
1396 inm = (struct in6_multi *)ifma->ifma_protospec;
1397 switch (mli->mli_version) {
1398 case MLD_VERSION_1:
1399 mld_v1_process_group_timer(mli, inm);
1400 break;
1401 case MLD_VERSION_2:
1402 mld_v2_process_group_timers(mli, &qrq,
1403 &scq, inm, uri_fasthz);
1404 break;
1405 }
1406 }
1407 IF_ADDR_RUNLOCK(ifp);
1408
1409 switch (mli->mli_version) {
1410 case MLD_VERSION_1:
1411 /*
1412 * Transmit reports for this lifecycle. This
1413 * is done while not holding IF_ADDR_LOCK
1414 * since this can call
1415 * in6ifa_ifpforlinklocal() which locks
1416 * IF_ADDR_LOCK internally as well as
1417 * ip6_output() to transmit a packet.
1418 */
1419 SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead,
1420 in6m_nrele, tinm) {
1421 SLIST_REMOVE_HEAD(&mli->mli_relinmhead,
1422 in6m_nrele);
1423 (void)mld_v1_transmit_report(inm,
1424 MLD_LISTENER_REPORT);
1425 }
1426 break;
1427 case MLD_VERSION_2:
1428 mld_dispatch_queue(&qrq, 0);
1429 mld_dispatch_queue(&scq, 0);
1430
1431 /*
1432 * Free the in_multi reference(s) for
1433 * this lifecycle.
1434 */
1435 SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead,
1436 in6m_nrele, tinm) {
1437 SLIST_REMOVE_HEAD(&mli->mli_relinmhead,
1438 in6m_nrele);
1439 in6m_release_locked(inm);
1440 }
1441 break;
1442 }
1443 }
1444
1445 out_locked:
1446 MLD_UNLOCK();
1447 IN6_MULTI_UNLOCK();
1448 }
1449
1450 /*
1451 * Update host report group timer.
1452 * Will update the global pending timer flags.
1453 */
1454 static void
1455 mld_v1_process_group_timer(struct mld_ifsoftc *mli, struct in6_multi *inm)
1456 {
1457 int report_timer_expired;
1458
1459 IN6_MULTI_LOCK_ASSERT();
1460 MLD_LOCK_ASSERT();
1461
1462 if (inm->in6m_timer == 0) {
1463 report_timer_expired = 0;
1464 } else if (--inm->in6m_timer == 0) {
1465 report_timer_expired = 1;
1466 } else {
1467 V_current_state_timers_running6 = 1;
1468 return;
1469 }
1470
1471 switch (inm->in6m_state) {
1472 case MLD_NOT_MEMBER:
1473 case MLD_SILENT_MEMBER:
1474 case MLD_IDLE_MEMBER:
1475 case MLD_LAZY_MEMBER:
1476 case MLD_SLEEPING_MEMBER:
1477 case MLD_AWAKENING_MEMBER:
1478 break;
1479 case MLD_REPORTING_MEMBER:
1480 if (report_timer_expired) {
1481 inm->in6m_state = MLD_IDLE_MEMBER;
1482 SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
1483 in6m_nrele);
1484 }
1485 break;
1486 case MLD_G_QUERY_PENDING_MEMBER:
1487 case MLD_SG_QUERY_PENDING_MEMBER:
1488 case MLD_LEAVING_MEMBER:
1489 break;
1490 }
1491 }
1492
1493 /*
1494 * Update a group's timers for MLDv2.
1495 * Will update the global pending timer flags.
1496 * Note: Unlocked read from mli.
1497 */
1498 static void
1499 mld_v2_process_group_timers(struct mld_ifsoftc *mli,
1500 struct mbufq *qrq, struct mbufq *scq,
1501 struct in6_multi *inm, const int uri_fasthz)
1502 {
1503 int query_response_timer_expired;
1504 int state_change_retransmit_timer_expired;
1505 #ifdef KTR
1506 char ip6tbuf[INET6_ADDRSTRLEN];
1507 #endif
1508
1509 IN6_MULTI_LOCK_ASSERT();
1510 MLD_LOCK_ASSERT();
1511
1512 query_response_timer_expired = 0;
1513 state_change_retransmit_timer_expired = 0;
1514
1515 /*
1516 * During a transition from compatibility mode back to MLDv2,
1517 * a group record in REPORTING state may still have its group
1518 * timer active. This is a no-op in this function; it is easier
1519 * to deal with it here than to complicate the slow-timeout path.
1520 */
1521 if (inm->in6m_timer == 0) {
1522 query_response_timer_expired = 0;
1523 } else if (--inm->in6m_timer == 0) {
1524 query_response_timer_expired = 1;
1525 } else {
1526 V_current_state_timers_running6 = 1;
1527 }
1528
1529 if (inm->in6m_sctimer == 0) {
1530 state_change_retransmit_timer_expired = 0;
1531 } else if (--inm->in6m_sctimer == 0) {
1532 state_change_retransmit_timer_expired = 1;
1533 } else {
1534 V_state_change_timers_running6 = 1;
1535 }
1536
1537 /* We are in fasttimo, so be quick about it. */
1538 if (!state_change_retransmit_timer_expired &&
1539 !query_response_timer_expired)
1540 return;
1541
1542 switch (inm->in6m_state) {
1543 case MLD_NOT_MEMBER:
1544 case MLD_SILENT_MEMBER:
1545 case MLD_SLEEPING_MEMBER:
1546 case MLD_LAZY_MEMBER:
1547 case MLD_AWAKENING_MEMBER:
1548 case MLD_IDLE_MEMBER:
1549 break;
1550 case MLD_G_QUERY_PENDING_MEMBER:
1551 case MLD_SG_QUERY_PENDING_MEMBER:
1552 /*
1553 * Respond to a previously pending Group-Specific
1554 * or Group-and-Source-Specific query by enqueueing
1555 * the appropriate Current-State report for
1556 * immediate transmission.
1557 */
1558 if (query_response_timer_expired) {
1559 int retval;
1560
1561 retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1,
1562 (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER),
1563 0);
1564 CTR2(KTR_MLD, "%s: enqueue record = %d",
1565 __func__, retval);
1566 inm->in6m_state = MLD_REPORTING_MEMBER;
1567 in6m_clear_recorded(inm);
1568 }
1569 /* FALLTHROUGH */
1570 case MLD_REPORTING_MEMBER:
1571 case MLD_LEAVING_MEMBER:
1572 if (state_change_retransmit_timer_expired) {
1573 /*
1574 * State-change retransmission timer fired.
1575 * If there are any further pending retransmissions,
1576 * set the global pending state-change flag, and
1577 * reset the timer.
1578 */
1579 if (--inm->in6m_scrv > 0) {
1580 inm->in6m_sctimer = uri_fasthz;
1581 V_state_change_timers_running6 = 1;
1582 }
1583 /*
1584 * Retransmit the previously computed state-change
1585 * report. If there are no further pending
1586 * retransmissions, the mbuf queue will be consumed.
1587 * Update T0 state to T1 as we have now sent
1588 * a state-change.
1589 */
1590 (void)mld_v2_merge_state_changes(inm, scq);
1591
1592 in6m_commit(inm);
1593 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
1594 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
1595 if_name(inm->in6m_ifp));
1596
1597 /*
1598 * If we are leaving the group for good, make sure
1599 * we release MLD's reference to it.
1600 * This release must be deferred using a SLIST,
1601 * as we are called from a loop which traverses
1602 * the in_ifmultiaddr TAILQ.
1603 */
1604 if (inm->in6m_state == MLD_LEAVING_MEMBER &&
1605 inm->in6m_scrv == 0) {
1606 inm->in6m_state = MLD_NOT_MEMBER;
1607 SLIST_INSERT_HEAD(&mli->mli_relinmhead,
1608 inm, in6m_nrele);
1609 }
1610 }
1611 break;
1612 }
1613 }
1614
1615 /*
1616 * Switch to a different version on the given interface,
1617 * as per Section 9.12.
1618 */
1619 static void
1620 mld_set_version(struct mld_ifsoftc *mli, const int version)
1621 {
1622 int old_version_timer;
1623
1624 MLD_LOCK_ASSERT();
1625
1626 CTR4(KTR_MLD, "%s: switching to v%d on ifp %p(%s)", __func__,
1627 version, mli->mli_ifp, if_name(mli->mli_ifp));
1628
1629 if (version == MLD_VERSION_1) {
1630 /*
1631 * Compute the "Older Version Querier Present" timer as per
1632 * Section 9.12.
1633 */
1634 old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri;
1635 old_version_timer *= PR_SLOWHZ;
1636 mli->mli_v1_timer = old_version_timer;
1637 }
1638
1639 if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) {
1640 mli->mli_version = MLD_VERSION_1;
1641 mld_v2_cancel_link_timers(mli);
1642 }
1643 }
1644
1645 /*
1646 * Cancel pending MLDv2 timers for the given link and all groups
1647 * joined on it; state-change, general-query, and group-query timers.
1648 */
1649 static void
1650 mld_v2_cancel_link_timers(struct mld_ifsoftc *mli)
1651 {
1652 struct ifmultiaddr *ifma;
1653 struct ifnet *ifp;
1654 struct in6_multi *inm, *tinm;
1655
1656 CTR3(KTR_MLD, "%s: cancel v2 timers on ifp %p(%s)", __func__,
1657 mli->mli_ifp, if_name(mli->mli_ifp));
1658
1659 IN6_MULTI_LOCK_ASSERT();
1660 MLD_LOCK_ASSERT();
1661
1662 /*
1663 * Fast-track this potentially expensive operation
1664 * by checking all the global 'timer pending' flags.
1665 */
1666 if (!V_interface_timers_running6 &&
1667 !V_state_change_timers_running6 &&
1668 !V_current_state_timers_running6)
1669 return;
1670
1671 mli->mli_v2_timer = 0;
1672
1673 ifp = mli->mli_ifp;
1674
1675 IF_ADDR_RLOCK(ifp);
1676 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1677 if (ifma->ifma_addr->sa_family != AF_INET6)
1678 continue;
1679 inm = (struct in6_multi *)ifma->ifma_protospec;
1680 switch (inm->in6m_state) {
1681 case MLD_NOT_MEMBER:
1682 case MLD_SILENT_MEMBER:
1683 case MLD_IDLE_MEMBER:
1684 case MLD_LAZY_MEMBER:
1685 case MLD_SLEEPING_MEMBER:
1686 case MLD_AWAKENING_MEMBER:
1687 break;
1688 case MLD_LEAVING_MEMBER:
1689 /*
1690 * If we are leaving the group and switching
1691 * version, we need to release the final
1692 * reference held for issuing the INCLUDE {}.
1693 */
1694 SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
1695 in6m_nrele);
1696 /* FALLTHROUGH */
1697 case MLD_G_QUERY_PENDING_MEMBER:
1698 case MLD_SG_QUERY_PENDING_MEMBER:
1699 in6m_clear_recorded(inm);
1700 /* FALLTHROUGH */
1701 case MLD_REPORTING_MEMBER:
1702 inm->in6m_sctimer = 0;
1703 inm->in6m_timer = 0;
1704 inm->in6m_state = MLD_REPORTING_MEMBER;
1705 /*
1706 * Free any pending MLDv2 state-change records.
1707 */
1708 mbufq_drain(&inm->in6m_scq);
1709 break;
1710 }
1711 }
1712 IF_ADDR_RUNLOCK(ifp);
1713 SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead, in6m_nrele, tinm) {
1714 SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
1715 in6m_release_locked(inm);
1716 }
1717 }
1718
1719 /*
1720 * Global slowtimo handler.
1721 * VIMAGE: Timeout handlers are expected to service all vimages.
1722 */
1723 void
1724 mld_slowtimo(void)
1725 {
1726 VNET_ITERATOR_DECL(vnet_iter);
1727
1728 VNET_LIST_RLOCK_NOSLEEP();
1729 VNET_FOREACH(vnet_iter) {
1730 CURVNET_SET(vnet_iter);
1731 mld_slowtimo_vnet();
1732 CURVNET_RESTORE();
1733 }
1734 VNET_LIST_RUNLOCK_NOSLEEP();
1735 }
1736
1737 /*
1738 * Per-vnet slowtimo handler.
1739 */
1740 static void
1741 mld_slowtimo_vnet(void)
1742 {
1743 struct mld_ifsoftc *mli;
1744
1745 MLD_LOCK();
1746
1747 LIST_FOREACH(mli, &V_mli_head, mli_link) {
1748 mld_v1_process_querier_timers(mli);
1749 }
1750
1751 MLD_UNLOCK();
1752 }
1753
1754 /*
1755 * Update the Older Version Querier Present timers for a link.
1756 * See Section 9.12 of RFC 3810.
1757 */
1758 static void
1759 mld_v1_process_querier_timers(struct mld_ifsoftc *mli)
1760 {
1761
1762 MLD_LOCK_ASSERT();
1763
1764 if (mli->mli_version != MLD_VERSION_2 && --mli->mli_v1_timer == 0) {
1765 /*
1766 * MLDv1 Querier Present timer expired; revert to MLDv2.
1767 */
1768 CTR5(KTR_MLD,
1769 "%s: transition from v%d -> v%d on %p(%s)",
1770 __func__, mli->mli_version, MLD_VERSION_2,
1771 mli->mli_ifp, if_name(mli->mli_ifp));
1772 mli->mli_version = MLD_VERSION_2;
1773 }
1774 }
1775
1776 /*
1777 * Transmit an MLDv1 report immediately.
1778 */
1779 static int
1780 mld_v1_transmit_report(struct in6_multi *in6m, const int type)
1781 {
1782 struct ifnet *ifp;
1783 struct in6_ifaddr *ia;
1784 struct ip6_hdr *ip6;
1785 struct mbuf *mh, *md;
1786 struct mld_hdr *mld;
1787
1788 IN6_MULTI_LOCK_ASSERT();
1789 MLD_LOCK_ASSERT();
1790
1791 ifp = in6m->in6m_ifp;
1792 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
1793 /* ia may be NULL if link-local address is tentative. */
1794
1795 mh = m_gethdr(M_NOWAIT, MT_DATA);
1796 if (mh == NULL) {
1797 if (ia != NULL)
1798 ifa_free(&ia->ia_ifa);
1799 return (ENOMEM);
1800 }
1801 md = m_get(M_NOWAIT, MT_DATA);
1802 if (md == NULL) {
1803 m_free(mh);
1804 if (ia != NULL)
1805 ifa_free(&ia->ia_ifa);
1806 return (ENOMEM);
1807 }
1808 mh->m_next = md;
1809
1810 /*
1811 * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so
1812 * that ether_output() does not need to allocate another mbuf
1813 * for the header in the most common case.
1814 */
1815 M_ALIGN(mh, sizeof(struct ip6_hdr));
1816 mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr);
1817 mh->m_len = sizeof(struct ip6_hdr);
1818
1819 ip6 = mtod(mh, struct ip6_hdr *);
1820 ip6->ip6_flow = 0;
1821 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
1822 ip6->ip6_vfc |= IPV6_VERSION;
1823 ip6->ip6_nxt = IPPROTO_ICMPV6;
1824 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
1825 ip6->ip6_dst = in6m->in6m_addr;
1826
1827 md->m_len = sizeof(struct mld_hdr);
1828 mld = mtod(md, struct mld_hdr *);
1829 mld->mld_type = type;
1830 mld->mld_code = 0;
1831 mld->mld_cksum = 0;
1832 mld->mld_maxdelay = 0;
1833 mld->mld_reserved = 0;
1834 mld->mld_addr = in6m->in6m_addr;
1835 in6_clearscope(&mld->mld_addr);
1836 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
1837 sizeof(struct ip6_hdr), sizeof(struct mld_hdr));
1838
1839 mld_save_context(mh, ifp);
1840 mh->m_flags |= M_MLDV1;
1841
1842 mld_dispatch_packet(mh);
1843
1844 if (ia != NULL)
1845 ifa_free(&ia->ia_ifa);
1846 return (0);
1847 }
1848
1849 /*
1850 * Process a state change from the upper layer for the given IPv6 group.
1851 *
1852 * Each socket holds a reference on the in_multi in its own ip_moptions.
1853 * The socket layer will have made the necessary updates to.the group
1854 * state, it is now up to MLD to issue a state change report if there
1855 * has been any change between T0 (when the last state-change was issued)
1856 * and T1 (now).
1857 *
1858 * We use the MLDv2 state machine at group level. The MLd module
1859 * however makes the decision as to which MLD protocol version to speak.
1860 * A state change *from* INCLUDE {} always means an initial join.
1861 * A state change *to* INCLUDE {} always means a final leave.
1862 *
1863 * If delay is non-zero, and the state change is an initial multicast
1864 * join, the state change report will be delayed by 'delay' ticks
1865 * in units of PR_FASTHZ if MLDv1 is active on the link; otherwise
1866 * the initial MLDv2 state change report will be delayed by whichever
1867 * is sooner, a pending state-change timer or delay itself.
1868 *
1869 * VIMAGE: curvnet should have been set by caller, as this routine
1870 * is called from the socket option handlers.
1871 */
1872 int
1873 mld_change_state(struct in6_multi *inm, const int delay)
1874 {
1875 struct mld_ifsoftc *mli;
1876 struct ifnet *ifp;
1877 int error;
1878
1879 IN6_MULTI_LOCK_ASSERT();
1880
1881 error = 0;
1882
1883 /*
1884 * Try to detect if the upper layer just asked us to change state
1885 * for an interface which has now gone away.
1886 */
1887 KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__));
1888 ifp = inm->in6m_ifma->ifma_ifp;
1889 if (ifp != NULL) {
1890 /*
1891 * Sanity check that netinet6's notion of ifp is the
1892 * same as net's.
1893 */
1894 KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__));
1895 }
1896
1897 MLD_LOCK();
1898
1899 mli = MLD_IFINFO(ifp);
1900 KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
1901
1902 /*
1903 * If we detect a state transition to or from MCAST_UNDEFINED
1904 * for this group, then we are starting or finishing an MLD
1905 * life cycle for this group.
1906 */
1907 if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) {
1908 CTR3(KTR_MLD, "%s: inm transition %d -> %d", __func__,
1909 inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode);
1910 if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) {
1911 CTR1(KTR_MLD, "%s: initial join", __func__);
1912 error = mld_initial_join(inm, mli, delay);
1913 goto out_locked;
1914 } else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) {
1915 CTR1(KTR_MLD, "%s: final leave", __func__);
1916 mld_final_leave(inm, mli);
1917 goto out_locked;
1918 }
1919 } else {
1920 CTR1(KTR_MLD, "%s: filter set change", __func__);
1921 }
1922
1923 error = mld_handle_state_change(inm, mli);
1924
1925 out_locked:
1926 MLD_UNLOCK();
1927 return (error);
1928 }
1929
1930 /*
1931 * Perform the initial join for an MLD group.
1932 *
1933 * When joining a group:
1934 * If the group should have its MLD traffic suppressed, do nothing.
1935 * MLDv1 starts sending MLDv1 host membership reports.
1936 * MLDv2 will schedule an MLDv2 state-change report containing the
1937 * initial state of the membership.
1938 *
1939 * If the delay argument is non-zero, then we must delay sending the
1940 * initial state change for delay ticks (in units of PR_FASTHZ).
1941 */
1942 static int
1943 mld_initial_join(struct in6_multi *inm, struct mld_ifsoftc *mli,
1944 const int delay)
1945 {
1946 struct ifnet *ifp;
1947 struct mbufq *mq;
1948 int error, retval, syncstates;
1949 int odelay;
1950 #ifdef KTR
1951 char ip6tbuf[INET6_ADDRSTRLEN];
1952 #endif
1953
1954 CTR4(KTR_MLD, "%s: initial join %s on ifp %p(%s)",
1955 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
1956 inm->in6m_ifp, if_name(inm->in6m_ifp));
1957
1958 error = 0;
1959 syncstates = 1;
1960
1961 ifp = inm->in6m_ifp;
1962
1963 IN6_MULTI_LOCK_ASSERT();
1964 MLD_LOCK_ASSERT();
1965
1966 KASSERT(mli && mli->mli_ifp == ifp, ("%s: inconsistent ifp", __func__));
1967
1968 /*
1969 * Groups joined on loopback or marked as 'not reported',
1970 * enter the MLD_SILENT_MEMBER state and
1971 * are never reported in any protocol exchanges.
1972 * All other groups enter the appropriate state machine
1973 * for the version in use on this link.
1974 * A link marked as MLIF_SILENT causes MLD to be completely
1975 * disabled for the link.
1976 */
1977 if ((ifp->if_flags & IFF_LOOPBACK) ||
1978 (mli->mli_flags & MLIF_SILENT) ||
1979 !mld_is_addr_reported(&inm->in6m_addr)) {
1980 CTR1(KTR_MLD,
1981 "%s: not kicking state machine for silent group", __func__);
1982 inm->in6m_state = MLD_SILENT_MEMBER;
1983 inm->in6m_timer = 0;
1984 } else {
1985 /*
1986 * Deal with overlapping in_multi lifecycle.
1987 * If this group was LEAVING, then make sure
1988 * we drop the reference we picked up to keep the
1989 * group around for the final INCLUDE {} enqueue.
1990 */
1991 if (mli->mli_version == MLD_VERSION_2 &&
1992 inm->in6m_state == MLD_LEAVING_MEMBER)
1993 in6m_release_locked(inm);
1994
1995 inm->in6m_state = MLD_REPORTING_MEMBER;
1996
1997 switch (mli->mli_version) {
1998 case MLD_VERSION_1:
1999 /*
2000 * If a delay was provided, only use it if
2001 * it is greater than the delay normally
2002 * used for an MLDv1 state change report,
2003 * and delay sending the initial MLDv1 report
2004 * by not transitioning to the IDLE state.
2005 */
2006 odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI * PR_FASTHZ);
2007 if (delay) {
2008 inm->in6m_timer = max(delay, odelay);
2009 V_current_state_timers_running6 = 1;
2010 } else {
2011 inm->in6m_state = MLD_IDLE_MEMBER;
2012 error = mld_v1_transmit_report(inm,
2013 MLD_LISTENER_REPORT);
2014 if (error == 0) {
2015 inm->in6m_timer = odelay;
2016 V_current_state_timers_running6 = 1;
2017 }
2018 }
2019 break;
2020
2021 case MLD_VERSION_2:
2022 /*
2023 * Defer update of T0 to T1, until the first copy
2024 * of the state change has been transmitted.
2025 */
2026 syncstates = 0;
2027
2028 /*
2029 * Immediately enqueue a State-Change Report for
2030 * this interface, freeing any previous reports.
2031 * Don't kick the timers if there is nothing to do,
2032 * or if an error occurred.
2033 */
2034 mq = &inm->in6m_scq;
2035 mbufq_drain(mq);
2036 retval = mld_v2_enqueue_group_record(mq, inm, 1,
2037 0, 0, (mli->mli_flags & MLIF_USEALLOW));
2038 CTR2(KTR_MLD, "%s: enqueue record = %d",
2039 __func__, retval);
2040 if (retval <= 0) {
2041 error = retval * -1;
2042 break;
2043 }
2044
2045 /*
2046 * Schedule transmission of pending state-change
2047 * report up to RV times for this link. The timer
2048 * will fire at the next mld_fasttimo (~200ms),
2049 * giving us an opportunity to merge the reports.
2050 *
2051 * If a delay was provided to this function, only
2052 * use this delay if sooner than the existing one.
2053 */
2054 KASSERT(mli->mli_rv > 1,
2055 ("%s: invalid robustness %d", __func__,
2056 mli->mli_rv));
2057 inm->in6m_scrv = mli->mli_rv;
2058 if (delay) {
2059 if (inm->in6m_sctimer > 1) {
2060 inm->in6m_sctimer =
2061 min(inm->in6m_sctimer, delay);
2062 } else
2063 inm->in6m_sctimer = delay;
2064 } else
2065 inm->in6m_sctimer = 1;
2066 V_state_change_timers_running6 = 1;
2067
2068 error = 0;
2069 break;
2070 }
2071 }
2072
2073 /*
2074 * Only update the T0 state if state change is atomic,
2075 * i.e. we don't need to wait for a timer to fire before we
2076 * can consider the state change to have been communicated.
2077 */
2078 if (syncstates) {
2079 in6m_commit(inm);
2080 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2081 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2082 if_name(inm->in6m_ifp));
2083 }
2084
2085 return (error);
2086 }
2087
2088 /*
2089 * Issue an intermediate state change during the life-cycle.
2090 */
2091 static int
2092 mld_handle_state_change(struct in6_multi *inm, struct mld_ifsoftc *mli)
2093 {
2094 struct ifnet *ifp;
2095 int retval;
2096 #ifdef KTR
2097 char ip6tbuf[INET6_ADDRSTRLEN];
2098 #endif
2099
2100 CTR4(KTR_MLD, "%s: state change for %s on ifp %p(%s)",
2101 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2102 inm->in6m_ifp, if_name(inm->in6m_ifp));
2103
2104 ifp = inm->in6m_ifp;
2105
2106 IN6_MULTI_LOCK_ASSERT();
2107 MLD_LOCK_ASSERT();
2108
2109 KASSERT(mli && mli->mli_ifp == ifp,
2110 ("%s: inconsistent ifp", __func__));
2111
2112 if ((ifp->if_flags & IFF_LOOPBACK) ||
2113 (mli->mli_flags & MLIF_SILENT) ||
2114 !mld_is_addr_reported(&inm->in6m_addr) ||
2115 (mli->mli_version != MLD_VERSION_2)) {
2116 if (!mld_is_addr_reported(&inm->in6m_addr)) {
2117 CTR1(KTR_MLD,
2118 "%s: not kicking state machine for silent group", __func__);
2119 }
2120 CTR1(KTR_MLD, "%s: nothing to do", __func__);
2121 in6m_commit(inm);
2122 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2123 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2124 if_name(inm->in6m_ifp));
2125 return (0);
2126 }
2127
2128 mbufq_drain(&inm->in6m_scq);
2129
2130 retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0,
2131 (mli->mli_flags & MLIF_USEALLOW));
2132 CTR2(KTR_MLD, "%s: enqueue record = %d", __func__, retval);
2133 if (retval <= 0)
2134 return (-retval);
2135
2136 /*
2137 * If record(s) were enqueued, start the state-change
2138 * report timer for this group.
2139 */
2140 inm->in6m_scrv = mli->mli_rv;
2141 inm->in6m_sctimer = 1;
2142 V_state_change_timers_running6 = 1;
2143
2144 return (0);
2145 }
2146
2147 /*
2148 * Perform the final leave for a multicast address.
2149 *
2150 * When leaving a group:
2151 * MLDv1 sends a DONE message, if and only if we are the reporter.
2152 * MLDv2 enqueues a state-change report containing a transition
2153 * to INCLUDE {} for immediate transmission.
2154 */
2155 static void
2156 mld_final_leave(struct in6_multi *inm, struct mld_ifsoftc *mli)
2157 {
2158 int syncstates;
2159 #ifdef KTR
2160 char ip6tbuf[INET6_ADDRSTRLEN];
2161 #endif
2162
2163 syncstates = 1;
2164
2165 CTR4(KTR_MLD, "%s: final leave %s on ifp %p(%s)",
2166 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2167 inm->in6m_ifp, if_name(inm->in6m_ifp));
2168
2169 IN6_MULTI_LOCK_ASSERT();
2170 MLD_LOCK_ASSERT();
2171
2172 switch (inm->in6m_state) {
2173 case MLD_NOT_MEMBER:
2174 case MLD_SILENT_MEMBER:
2175 case MLD_LEAVING_MEMBER:
2176 /* Already leaving or left; do nothing. */
2177 CTR1(KTR_MLD,
2178 "%s: not kicking state machine for silent group", __func__);
2179 break;
2180 case MLD_REPORTING_MEMBER:
2181 case MLD_IDLE_MEMBER:
2182 case MLD_G_QUERY_PENDING_MEMBER:
2183 case MLD_SG_QUERY_PENDING_MEMBER:
2184 if (mli->mli_version == MLD_VERSION_1) {
2185 #ifdef INVARIANTS
2186 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
2187 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER)
2188 panic("%s: MLDv2 state reached, not MLDv2 mode",
2189 __func__);
2190 #endif
2191 mld_v1_transmit_report(inm, MLD_LISTENER_DONE);
2192 inm->in6m_state = MLD_NOT_MEMBER;
2193 V_current_state_timers_running6 = 1;
2194 } else if (mli->mli_version == MLD_VERSION_2) {
2195 /*
2196 * Stop group timer and all pending reports.
2197 * Immediately enqueue a state-change report
2198 * TO_IN {} to be sent on the next fast timeout,
2199 * giving us an opportunity to merge reports.
2200 */
2201 mbufq_drain(&inm->in6m_scq);
2202 inm->in6m_timer = 0;
2203 inm->in6m_scrv = mli->mli_rv;
2204 CTR4(KTR_MLD, "%s: Leaving %s/%s with %d "
2205 "pending retransmissions.", __func__,
2206 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2207 if_name(inm->in6m_ifp), inm->in6m_scrv);
2208 if (inm->in6m_scrv == 0) {
2209 inm->in6m_state = MLD_NOT_MEMBER;
2210 inm->in6m_sctimer = 0;
2211 } else {
2212 int retval;
2213
2214 in6m_acquire_locked(inm);
2215
2216 retval = mld_v2_enqueue_group_record(
2217 &inm->in6m_scq, inm, 1, 0, 0,
2218 (mli->mli_flags & MLIF_USEALLOW));
2219 KASSERT(retval != 0,
2220 ("%s: enqueue record = %d", __func__,
2221 retval));
2222
2223 inm->in6m_state = MLD_LEAVING_MEMBER;
2224 inm->in6m_sctimer = 1;
2225 V_state_change_timers_running6 = 1;
2226 syncstates = 0;
2227 }
2228 break;
2229 }
2230 break;
2231 case MLD_LAZY_MEMBER:
2232 case MLD_SLEEPING_MEMBER:
2233 case MLD_AWAKENING_MEMBER:
2234 /* Our reports are suppressed; do nothing. */
2235 break;
2236 }
2237
2238 if (syncstates) {
2239 in6m_commit(inm);
2240 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2241 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2242 if_name(inm->in6m_ifp));
2243 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
2244 CTR3(KTR_MLD, "%s: T1 now MCAST_UNDEFINED for %p/%s",
2245 __func__, &inm->in6m_addr, if_name(inm->in6m_ifp));
2246 }
2247 }
2248
2249 /*
2250 * Enqueue an MLDv2 group record to the given output queue.
2251 *
2252 * If is_state_change is zero, a current-state record is appended.
2253 * If is_state_change is non-zero, a state-change report is appended.
2254 *
2255 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2256 * If is_group_query is zero, and if there is a packet with free space
2257 * at the tail of the queue, it will be appended to providing there
2258 * is enough free space.
2259 * Otherwise a new mbuf packet chain is allocated.
2260 *
2261 * If is_source_query is non-zero, each source is checked to see if
2262 * it was recorded for a Group-Source query, and will be omitted if
2263 * it is not both in-mode and recorded.
2264 *
2265 * If use_block_allow is non-zero, state change reports for initial join
2266 * and final leave, on an inclusive mode group with a source list, will be
2267 * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively.
2268 *
2269 * The function will attempt to allocate leading space in the packet
2270 * for the IPv6+ICMP headers to be prepended without fragmenting the chain.
2271 *
2272 * If successful the size of all data appended to the queue is returned,
2273 * otherwise an error code less than zero is returned, or zero if
2274 * no record(s) were appended.
2275 */
2276 static int
2277 mld_v2_enqueue_group_record(struct mbufq *mq, struct in6_multi *inm,
2278 const int is_state_change, const int is_group_query,
2279 const int is_source_query, const int use_block_allow)
2280 {
2281 struct mldv2_record mr;
2282 struct mldv2_record *pmr;
2283 struct ifnet *ifp;
2284 struct ip6_msource *ims, *nims;
2285 struct mbuf *m0, *m, *md;
2286 int error, is_filter_list_change;
2287 int minrec0len, m0srcs, msrcs, nbytes, off;
2288 int record_has_sources;
2289 int now;
2290 int type;
2291 uint8_t mode;
2292 #ifdef KTR
2293 char ip6tbuf[INET6_ADDRSTRLEN];
2294 #endif
2295
2296 IN6_MULTI_LOCK_ASSERT();
2297
2298 error = 0;
2299 ifp = inm->in6m_ifp;
2300 is_filter_list_change = 0;
2301 m = NULL;
2302 m0 = NULL;
2303 m0srcs = 0;
2304 msrcs = 0;
2305 nbytes = 0;
2306 nims = NULL;
2307 record_has_sources = 1;
2308 pmr = NULL;
2309 type = MLD_DO_NOTHING;
2310 mode = inm->in6m_st[1].iss_fmode;
2311
2312 /*
2313 * If we did not transition out of ASM mode during t0->t1,
2314 * and there are no source nodes to process, we can skip
2315 * the generation of source records.
2316 */
2317 if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 &&
2318 inm->in6m_nsrc == 0)
2319 record_has_sources = 0;
2320
2321 if (is_state_change) {
2322 /*
2323 * Queue a state change record.
2324 * If the mode did not change, and there are non-ASM
2325 * listeners or source filters present,
2326 * we potentially need to issue two records for the group.
2327 * If there are ASM listeners, and there was no filter
2328 * mode transition of any kind, do nothing.
2329 *
2330 * If we are transitioning to MCAST_UNDEFINED, we need
2331 * not send any sources. A transition to/from this state is
2332 * considered inclusive with some special treatment.
2333 *
2334 * If we are rewriting initial joins/leaves to use
2335 * ALLOW/BLOCK, and the group's membership is inclusive,
2336 * we need to send sources in all cases.
2337 */
2338 if (mode != inm->in6m_st[0].iss_fmode) {
2339 if (mode == MCAST_EXCLUDE) {
2340 CTR1(KTR_MLD, "%s: change to EXCLUDE",
2341 __func__);
2342 type = MLD_CHANGE_TO_EXCLUDE_MODE;
2343 } else {
2344 CTR1(KTR_MLD, "%s: change to INCLUDE",
2345 __func__);
2346 if (use_block_allow) {
2347 /*
2348 * XXX
2349 * Here we're interested in state
2350 * edges either direction between
2351 * MCAST_UNDEFINED and MCAST_INCLUDE.
2352 * Perhaps we should just check
2353 * the group state, rather than
2354 * the filter mode.
2355 */
2356 if (mode == MCAST_UNDEFINED) {
2357 type = MLD_BLOCK_OLD_SOURCES;
2358 } else {
2359 type = MLD_ALLOW_NEW_SOURCES;
2360 }
2361 } else {
2362 type = MLD_CHANGE_TO_INCLUDE_MODE;
2363 if (mode == MCAST_UNDEFINED)
2364 record_has_sources = 0;
2365 }
2366 }
2367 } else {
2368 if (record_has_sources) {
2369 is_filter_list_change = 1;
2370 } else {
2371 type = MLD_DO_NOTHING;
2372 }
2373 }
2374 } else {
2375 /*
2376 * Queue a current state record.
2377 */
2378 if (mode == MCAST_EXCLUDE) {
2379 type = MLD_MODE_IS_EXCLUDE;
2380 } else if (mode == MCAST_INCLUDE) {
2381 type = MLD_MODE_IS_INCLUDE;
2382 KASSERT(inm->in6m_st[1].iss_asm == 0,
2383 ("%s: inm %p is INCLUDE but ASM count is %d",
2384 __func__, inm, inm->in6m_st[1].iss_asm));
2385 }
2386 }
2387
2388 /*
2389 * Generate the filter list changes using a separate function.
2390 */
2391 if (is_filter_list_change)
2392 return (mld_v2_enqueue_filter_change(mq, inm));
2393
2394 if (type == MLD_DO_NOTHING) {
2395 CTR3(KTR_MLD, "%s: nothing to do for %s/%s",
2396 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2397 if_name(inm->in6m_ifp));
2398 return (0);
2399 }
2400
2401 /*
2402 * If any sources are present, we must be able to fit at least
2403 * one in the trailing space of the tail packet's mbuf,
2404 * ideally more.
2405 */
2406 minrec0len = sizeof(struct mldv2_record);
2407 if (record_has_sources)
2408 minrec0len += sizeof(struct in6_addr);
2409
2410 CTR4(KTR_MLD, "%s: queueing %s for %s/%s", __func__,
2411 mld_rec_type_to_str(type),
2412 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2413 if_name(inm->in6m_ifp));
2414
2415 /*
2416 * Check if we have a packet in the tail of the queue for this
2417 * group into which the first group record for this group will fit.
2418 * Otherwise allocate a new packet.
2419 * Always allocate leading space for IP6+RA+ICMPV6+REPORT.
2420 * Note: Group records for G/GSR query responses MUST be sent
2421 * in their own packet.
2422 */
2423 m0 = mbufq_last(mq);
2424 if (!is_group_query &&
2425 m0 != NULL &&
2426 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) &&
2427 (m0->m_pkthdr.len + minrec0len) <
2428 (ifp->if_mtu - MLD_MTUSPACE)) {
2429 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2430 sizeof(struct mldv2_record)) /
2431 sizeof(struct in6_addr);
2432 m = m0;
2433 CTR1(KTR_MLD, "%s: use existing packet", __func__);
2434 } else {
2435 if (mbufq_full(mq)) {
2436 CTR1(KTR_MLD, "%s: outbound queue full", __func__);
2437 return (-ENOMEM);
2438 }
2439 m = NULL;
2440 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2441 sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
2442 if (!is_state_change && !is_group_query)
2443 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2444 if (m == NULL)
2445 m = m_gethdr(M_NOWAIT, MT_DATA);
2446 if (m == NULL)
2447 return (-ENOMEM);
2448
2449 mld_save_context(m, ifp);
2450
2451 CTR1(KTR_MLD, "%s: allocated first packet", __func__);
2452 }
2453
2454 /*
2455 * Append group record.
2456 * If we have sources, we don't know how many yet.
2457 */
2458 mr.mr_type = type;
2459 mr.mr_datalen = 0;
2460 mr.mr_numsrc = 0;
2461 mr.mr_addr = inm->in6m_addr;
2462 in6_clearscope(&mr.mr_addr);
2463 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
2464 if (m != m0)
2465 m_freem(m);
2466 CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
2467 return (-ENOMEM);
2468 }
2469 nbytes += sizeof(struct mldv2_record);
2470
2471 /*
2472 * Append as many sources as will fit in the first packet.
2473 * If we are appending to a new packet, the chain allocation
2474 * may potentially use clusters; use m_getptr() in this case.
2475 * If we are appending to an existing packet, we need to obtain
2476 * a pointer to the group record after m_append(), in case a new
2477 * mbuf was allocated.
2478 *
2479 * Only append sources which are in-mode at t1. If we are
2480 * transitioning to MCAST_UNDEFINED state on the group, and
2481 * use_block_allow is zero, do not include source entries.
2482 * Otherwise, we need to include this source in the report.
2483 *
2484 * Only report recorded sources in our filter set when responding
2485 * to a group-source query.
2486 */
2487 if (record_has_sources) {
2488 if (m == m0) {
2489 md = m_last(m);
2490 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
2491 md->m_len - nbytes);
2492 } else {
2493 md = m_getptr(m, 0, &off);
2494 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
2495 off);
2496 }
2497 msrcs = 0;
2498 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs,
2499 nims) {
2500 CTR2(KTR_MLD, "%s: visit node %s", __func__,
2501 ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2502 now = im6s_get_mode(inm, ims, 1);
2503 CTR2(KTR_MLD, "%s: node is %d", __func__, now);
2504 if ((now != mode) ||
2505 (now == mode &&
2506 (!use_block_allow && mode == MCAST_UNDEFINED))) {
2507 CTR1(KTR_MLD, "%s: skip node", __func__);
2508 continue;
2509 }
2510 if (is_source_query && ims->im6s_stp == 0) {
2511 CTR1(KTR_MLD, "%s: skip unrecorded node",
2512 __func__);
2513 continue;
2514 }
2515 CTR1(KTR_MLD, "%s: append node", __func__);
2516 if (!m_append(m, sizeof(struct in6_addr),
2517 (void *)&ims->im6s_addr)) {
2518 if (m != m0)
2519 m_freem(m);
2520 CTR1(KTR_MLD, "%s: m_append() failed.",
2521 __func__);
2522 return (-ENOMEM);
2523 }
2524 nbytes += sizeof(struct in6_addr);
2525 ++msrcs;
2526 if (msrcs == m0srcs)
2527 break;
2528 }
2529 CTR2(KTR_MLD, "%s: msrcs is %d this packet", __func__,
2530 msrcs);
2531 pmr->mr_numsrc = htons(msrcs);
2532 nbytes += (msrcs * sizeof(struct in6_addr));
2533 }
2534
2535 if (is_source_query && msrcs == 0) {
2536 CTR1(KTR_MLD, "%s: no recorded sources to report", __func__);
2537 if (m != m0)
2538 m_freem(m);
2539 return (0);
2540 }
2541
2542 /*
2543 * We are good to go with first packet.
2544 */
2545 if (m != m0) {
2546 CTR1(KTR_MLD, "%s: enqueueing first packet", __func__);
2547 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2548 mbufq_enqueue(mq, m);
2549 } else
2550 m->m_pkthdr.PH_vt.vt_nrecs++;
2551
2552 /*
2553 * No further work needed if no source list in packet(s).
2554 */
2555 if (!record_has_sources)
2556 return (nbytes);
2557
2558 /*
2559 * Whilst sources remain to be announced, we need to allocate
2560 * a new packet and fill out as many sources as will fit.
2561 * Always try for a cluster first.
2562 */
2563 while (nims != NULL) {
2564 if (mbufq_full(mq)) {
2565 CTR1(KTR_MLD, "%s: outbound queue full", __func__);
2566 return (-ENOMEM);
2567 }
2568 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2569 if (m == NULL)
2570 m = m_gethdr(M_NOWAIT, MT_DATA);
2571 if (m == NULL)
2572 return (-ENOMEM);
2573 mld_save_context(m, ifp);
2574 md = m_getptr(m, 0, &off);
2575 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off);
2576 CTR1(KTR_MLD, "%s: allocated next packet", __func__);
2577
2578 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
2579 if (m != m0)
2580 m_freem(m);
2581 CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
2582 return (-ENOMEM);
2583 }
2584 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2585 nbytes += sizeof(struct mldv2_record);
2586
2587 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2588 sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
2589
2590 msrcs = 0;
2591 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2592 CTR2(KTR_MLD, "%s: visit node %s",
2593 __func__, ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2594 now = im6s_get_mode(inm, ims, 1);
2595 if ((now != mode) ||
2596 (now == mode &&
2597 (!use_block_allow && mode == MCAST_UNDEFINED))) {
2598 CTR1(KTR_MLD, "%s: skip node", __func__);
2599 continue;
2600 }
2601 if (is_source_query && ims->im6s_stp == 0) {
2602 CTR1(KTR_MLD, "%s: skip unrecorded node",
2603 __func__);
2604 continue;
2605 }
2606 CTR1(KTR_MLD, "%s: append node", __func__);
2607 if (!m_append(m, sizeof(struct in6_addr),
2608 (void *)&ims->im6s_addr)) {
2609 if (m != m0)
2610 m_freem(m);
2611 CTR1(KTR_MLD, "%s: m_append() failed.",
2612 __func__);
2613 return (-ENOMEM);
2614 }
2615 ++msrcs;
2616 if (msrcs == m0srcs)
2617 break;
2618 }
2619 pmr->mr_numsrc = htons(msrcs);
2620 nbytes += (msrcs * sizeof(struct in6_addr));
2621
2622 CTR1(KTR_MLD, "%s: enqueueing next packet", __func__);
2623 mbufq_enqueue(mq, m);
2624 }
2625
2626 return (nbytes);
2627 }
2628
2629 /*
2630 * Type used to mark record pass completion.
2631 * We exploit the fact we can cast to this easily from the
2632 * current filter modes on each ip_msource node.
2633 */
2634 typedef enum {
2635 REC_NONE = 0x00, /* MCAST_UNDEFINED */
2636 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
2637 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
2638 REC_FULL = REC_ALLOW | REC_BLOCK
2639 } rectype_t;
2640
2641 /*
2642 * Enqueue an MLDv2 filter list change to the given output queue.
2643 *
2644 * Source list filter state is held in an RB-tree. When the filter list
2645 * for a group is changed without changing its mode, we need to compute
2646 * the deltas between T0 and T1 for each source in the filter set,
2647 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
2648 *
2649 * As we may potentially queue two record types, and the entire R-B tree
2650 * needs to be walked at once, we break this out into its own function
2651 * so we can generate a tightly packed queue of packets.
2652 *
2653 * XXX This could be written to only use one tree walk, although that makes
2654 * serializing into the mbuf chains a bit harder. For now we do two walks
2655 * which makes things easier on us, and it may or may not be harder on
2656 * the L2 cache.
2657 *
2658 * If successful the size of all data appended to the queue is returned,
2659 * otherwise an error code less than zero is returned, or zero if
2660 * no record(s) were appended.
2661 */
2662 static int
2663 mld_v2_enqueue_filter_change(struct mbufq *mq, struct in6_multi *inm)
2664 {
2665 static const int MINRECLEN =
2666 sizeof(struct mldv2_record) + sizeof(struct in6_addr);
2667 struct ifnet *ifp;
2668 struct mldv2_record mr;
2669 struct mldv2_record *pmr;
2670 struct ip6_msource *ims, *nims;
2671 struct mbuf *m, *m0, *md;
2672 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
2673 int nallow, nblock;
2674 uint8_t mode, now, then;
2675 rectype_t crt, drt, nrt;
2676 #ifdef KTR
2677 char ip6tbuf[INET6_ADDRSTRLEN];
2678 #endif
2679
2680 IN6_MULTI_LOCK_ASSERT();
2681
2682 if (inm->in6m_nsrc == 0 ||
2683 (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0))
2684 return (0);
2685
2686 ifp = inm->in6m_ifp; /* interface */
2687 mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */
2688 crt = REC_NONE; /* current group record type */
2689 drt = REC_NONE; /* mask of completed group record types */
2690 nrt = REC_NONE; /* record type for current node */
2691 m0srcs = 0; /* # source which will fit in current mbuf chain */
2692 npbytes = 0; /* # of bytes appended this packet */
2693 nbytes = 0; /* # of bytes appended to group's state-change queue */
2694 rsrcs = 0; /* # sources encoded in current record */
2695 schanged = 0; /* # nodes encoded in overall filter change */
2696 nallow = 0; /* # of source entries in ALLOW_NEW */
2697 nblock = 0; /* # of source entries in BLOCK_OLD */
2698 nims = NULL; /* next tree node pointer */
2699
2700 /*
2701 * For each possible filter record mode.
2702 * The first kind of source we encounter tells us which
2703 * is the first kind of record we start appending.
2704 * If a node transitioned to UNDEFINED at t1, its mode is treated
2705 * as the inverse of the group's filter mode.
2706 */
2707 while (drt != REC_FULL) {
2708 do {
2709 m0 = mbufq_last(mq);
2710 if (m0 != NULL &&
2711 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
2712 MLD_V2_REPORT_MAXRECS) &&
2713 (m0->m_pkthdr.len + MINRECLEN) <
2714 (ifp->if_mtu - MLD_MTUSPACE)) {
2715 m = m0;
2716 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2717 sizeof(struct mldv2_record)) /
2718 sizeof(struct in6_addr);
2719 CTR1(KTR_MLD,
2720 "%s: use previous packet", __func__);
2721 } else {
2722 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2723 if (m == NULL)
2724 m = m_gethdr(M_NOWAIT, MT_DATA);
2725 if (m == NULL) {
2726 CTR1(KTR_MLD,
2727 "%s: m_get*() failed", __func__);
2728 return (-ENOMEM);
2729 }
2730 m->m_pkthdr.PH_vt.vt_nrecs = 0;
2731 mld_save_context(m, ifp);
2732 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2733 sizeof(struct mldv2_record)) /
2734 sizeof(struct in6_addr);
2735 npbytes = 0;
2736 CTR1(KTR_MLD,
2737 "%s: allocated new packet", __func__);
2738 }
2739 /*
2740 * Append the MLD group record header to the
2741 * current packet's data area.
2742 * Recalculate pointer to free space for next
2743 * group record, in case m_append() allocated
2744 * a new mbuf or cluster.
2745 */
2746 memset(&mr, 0, sizeof(mr));
2747 mr.mr_addr = inm->in6m_addr;
2748 in6_clearscope(&mr.mr_addr);
2749 if (!m_append(m, sizeof(mr), (void *)&mr)) {
2750 if (m != m0)
2751 m_freem(m);
2752 CTR1(KTR_MLD,
2753 "%s: m_append() failed", __func__);
2754 return (-ENOMEM);
2755 }
2756 npbytes += sizeof(struct mldv2_record);
2757 if (m != m0) {
2758 /* new packet; offset in chain */
2759 md = m_getptr(m, npbytes -
2760 sizeof(struct mldv2_record), &off);
2761 pmr = (struct mldv2_record *)(mtod(md,
2762 uint8_t *) + off);
2763 } else {
2764 /* current packet; offset from last append */
2765 md = m_last(m);
2766 pmr = (struct mldv2_record *)(mtod(md,
2767 uint8_t *) + md->m_len -
2768 sizeof(struct mldv2_record));
2769 }
2770 /*
2771 * Begin walking the tree for this record type
2772 * pass, or continue from where we left off
2773 * previously if we had to allocate a new packet.
2774 * Only report deltas in-mode at t1.
2775 * We need not report included sources as allowed
2776 * if we are in inclusive mode on the group,
2777 * however the converse is not true.
2778 */
2779 rsrcs = 0;
2780 if (nims == NULL) {
2781 nims = RB_MIN(ip6_msource_tree,
2782 &inm->in6m_srcs);
2783 }
2784 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2785 CTR2(KTR_MLD, "%s: visit node %s", __func__,
2786 ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2787 now = im6s_get_mode(inm, ims, 1);
2788 then = im6s_get_mode(inm, ims, 0);
2789 CTR3(KTR_MLD, "%s: mode: t0 %d, t1 %d",
2790 __func__, then, now);
2791 if (now == then) {
2792 CTR1(KTR_MLD,
2793 "%s: skip unchanged", __func__);
2794 continue;
2795 }
2796 if (mode == MCAST_EXCLUDE &&
2797 now == MCAST_INCLUDE) {
2798 CTR1(KTR_MLD,
2799 "%s: skip IN src on EX group",
2800 __func__);
2801 continue;
2802 }
2803 nrt = (rectype_t)now;
2804 if (nrt == REC_NONE)
2805 nrt = (rectype_t)(~mode & REC_FULL);
2806 if (schanged++ == 0) {
2807 crt = nrt;
2808 } else if (crt != nrt)
2809 continue;
2810 if (!m_append(m, sizeof(struct in6_addr),
2811 (void *)&ims->im6s_addr)) {
2812 if (m != m0)
2813 m_freem(m);
2814 CTR1(KTR_MLD,
2815 "%s: m_append() failed", __func__);
2816 return (-ENOMEM);
2817 }
2818 nallow += !!(crt == REC_ALLOW);
2819 nblock += !!(crt == REC_BLOCK);
2820 if (++rsrcs == m0srcs)
2821 break;
2822 }
2823 /*
2824 * If we did not append any tree nodes on this
2825 * pass, back out of allocations.
2826 */
2827 if (rsrcs == 0) {
2828 npbytes -= sizeof(struct mldv2_record);
2829 if (m != m0) {
2830 CTR1(KTR_MLD,
2831 "%s: m_free(m)", __func__);
2832 m_freem(m);
2833 } else {
2834 CTR1(KTR_MLD,
2835 "%s: m_adj(m, -mr)", __func__);
2836 m_adj(m, -((int)sizeof(
2837 struct mldv2_record)));
2838 }
2839 continue;
2840 }
2841 npbytes += (rsrcs * sizeof(struct in6_addr));
2842 if (crt == REC_ALLOW)
2843 pmr->mr_type = MLD_ALLOW_NEW_SOURCES;
2844 else if (crt == REC_BLOCK)
2845 pmr->mr_type = MLD_BLOCK_OLD_SOURCES;
2846 pmr->mr_numsrc = htons(rsrcs);
2847 /*
2848 * Count the new group record, and enqueue this
2849 * packet if it wasn't already queued.
2850 */
2851 m->m_pkthdr.PH_vt.vt_nrecs++;
2852 if (m != m0)
2853 mbufq_enqueue(mq, m);
2854 nbytes += npbytes;
2855 } while (nims != NULL);
2856 drt |= crt;
2857 crt = (~crt & REC_FULL);
2858 }
2859
2860 CTR3(KTR_MLD, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
2861 nallow, nblock);
2862
2863 return (nbytes);
2864 }
2865
2866 static int
2867 mld_v2_merge_state_changes(struct in6_multi *inm, struct mbufq *scq)
2868 {
2869 struct mbufq *gq;
2870 struct mbuf *m; /* pending state-change */
2871 struct mbuf *m0; /* copy of pending state-change */
2872 struct mbuf *mt; /* last state-change in packet */
2873 int docopy, domerge;
2874 u_int recslen;
2875
2876 docopy = 0;
2877 domerge = 0;
2878 recslen = 0;
2879
2880 IN6_MULTI_LOCK_ASSERT();
2881 MLD_LOCK_ASSERT();
2882
2883 /*
2884 * If there are further pending retransmissions, make a writable
2885 * copy of each queued state-change message before merging.
2886 */
2887 if (inm->in6m_scrv > 0)
2888 docopy = 1;
2889
2890 gq = &inm->in6m_scq;
2891 #ifdef KTR
2892 if (mbufq_first(gq) == NULL) {
2893 CTR2(KTR_MLD, "%s: WARNING: queue for inm %p is empty",
2894 __func__, inm);
2895 }
2896 #endif
2897
2898 m = mbufq_first(gq);
2899 while (m != NULL) {
2900 /*
2901 * Only merge the report into the current packet if
2902 * there is sufficient space to do so; an MLDv2 report
2903 * packet may only contain 65,535 group records.
2904 * Always use a simple mbuf chain concatentation to do this,
2905 * as large state changes for single groups may have
2906 * allocated clusters.
2907 */
2908 domerge = 0;
2909 mt = mbufq_last(scq);
2910 if (mt != NULL) {
2911 recslen = m_length(m, NULL);
2912
2913 if ((mt->m_pkthdr.PH_vt.vt_nrecs +
2914 m->m_pkthdr.PH_vt.vt_nrecs <=
2915 MLD_V2_REPORT_MAXRECS) &&
2916 (mt->m_pkthdr.len + recslen <=
2917 (inm->in6m_ifp->if_mtu - MLD_MTUSPACE)))
2918 domerge = 1;
2919 }
2920
2921 if (!domerge && mbufq_full(gq)) {
2922 CTR2(KTR_MLD,
2923 "%s: outbound queue full, skipping whole packet %p",
2924 __func__, m);
2925 mt = m->m_nextpkt;
2926 if (!docopy)
2927 m_freem(m);
2928 m = mt;
2929 continue;
2930 }
2931
2932 if (!docopy) {
2933 CTR2(KTR_MLD, "%s: dequeueing %p", __func__, m);
2934 m0 = mbufq_dequeue(gq);
2935 m = m0->m_nextpkt;
2936 } else {
2937 CTR2(KTR_MLD, "%s: copying %p", __func__, m);
2938 m0 = m_dup(m, M_NOWAIT);
2939 if (m0 == NULL)
2940 return (ENOMEM);
2941 m0->m_nextpkt = NULL;
2942 m = m->m_nextpkt;
2943 }
2944
2945 if (!domerge) {
2946 CTR3(KTR_MLD, "%s: queueing %p to scq %p)",
2947 __func__, m0, scq);
2948 mbufq_enqueue(scq, m0);
2949 } else {
2950 struct mbuf *mtl; /* last mbuf of packet mt */
2951
2952 CTR3(KTR_MLD, "%s: merging %p with ifscq tail %p)",
2953 __func__, m0, mt);
2954
2955 mtl = m_last(mt);
2956 m0->m_flags &= ~M_PKTHDR;
2957 mt->m_pkthdr.len += recslen;
2958 mt->m_pkthdr.PH_vt.vt_nrecs +=
2959 m0->m_pkthdr.PH_vt.vt_nrecs;
2960
2961 mtl->m_next = m0;
2962 }
2963 }
2964
2965 return (0);
2966 }
2967
2968 /*
2969 * Respond to a pending MLDv2 General Query.
2970 */
2971 static void
2972 mld_v2_dispatch_general_query(struct mld_ifsoftc *mli)
2973 {
2974 struct ifmultiaddr *ifma;
2975 struct ifnet *ifp;
2976 struct in6_multi *inm;
2977 int retval;
2978
2979 IN6_MULTI_LOCK_ASSERT();
2980 MLD_LOCK_ASSERT();
2981
2982 KASSERT(mli->mli_version == MLD_VERSION_2,
2983 ("%s: called when version %d", __func__, mli->mli_version));
2984
2985 /*
2986 * Check that there are some packets queued. If so, send them first.
2987 * For large number of groups the reply to general query can take
2988 * many packets, we should finish sending them before starting of
2989 * queuing the new reply.
2990 */
2991 if (mbufq_len(&mli->mli_gq) != 0)
2992 goto send;
2993
2994 ifp = mli->mli_ifp;
2995
2996 IF_ADDR_RLOCK(ifp);
2997 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2998 if (ifma->ifma_addr->sa_family != AF_INET6 ||
2999 ifma->ifma_protospec == NULL)
3000 continue;
3001
3002 inm = (struct in6_multi *)ifma->ifma_protospec;
3003 KASSERT(ifp == inm->in6m_ifp,
3004 ("%s: inconsistent ifp", __func__));
3005
3006 switch (inm->in6m_state) {
3007 case MLD_NOT_MEMBER:
3008 case MLD_SILENT_MEMBER:
3009 break;
3010 case MLD_REPORTING_MEMBER:
3011 case MLD_IDLE_MEMBER:
3012 case MLD_LAZY_MEMBER:
3013 case MLD_SLEEPING_MEMBER:
3014 case MLD_AWAKENING_MEMBER:
3015 inm->in6m_state = MLD_REPORTING_MEMBER;
3016 retval = mld_v2_enqueue_group_record(&mli->mli_gq,
3017 inm, 0, 0, 0, 0);
3018 CTR2(KTR_MLD, "%s: enqueue record = %d",
3019 __func__, retval);
3020 break;
3021 case MLD_G_QUERY_PENDING_MEMBER:
3022 case MLD_SG_QUERY_PENDING_MEMBER:
3023 case MLD_LEAVING_MEMBER:
3024 break;
3025 }
3026 }
3027 IF_ADDR_RUNLOCK(ifp);
3028
3029 send:
3030 mld_dispatch_queue(&mli->mli_gq, MLD_MAX_RESPONSE_BURST);
3031
3032 /*
3033 * Slew transmission of bursts over 500ms intervals.
3034 */
3035 if (mbufq_first(&mli->mli_gq) != NULL) {
3036 mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY(
3037 MLD_RESPONSE_BURST_INTERVAL);
3038 V_interface_timers_running6 = 1;
3039 }
3040 }
3041
3042 /*
3043 * Transmit the next pending message in the output queue.
3044 *
3045 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3046 * MRT: Nothing needs to be done, as MLD traffic is always local to
3047 * a link and uses a link-scope multicast address.
3048 */
3049 static void
3050 mld_dispatch_packet(struct mbuf *m)
3051 {
3052 struct ip6_moptions im6o;
3053 struct ifnet *ifp;
3054 struct ifnet *oifp;
3055 struct mbuf *m0;
3056 struct mbuf *md;
3057 struct ip6_hdr *ip6;
3058 struct mld_hdr *mld;
3059 int error;
3060 int off;
3061 int type;
3062 uint32_t ifindex;
3063
3064 CTR2(KTR_MLD, "%s: transmit %p", __func__, m);
3065
3066 /*
3067 * Set VNET image pointer from enqueued mbuf chain
3068 * before doing anything else. Whilst we use interface
3069 * indexes to guard against interface detach, they are
3070 * unique to each VIMAGE and must be retrieved.
3071 */
3072 ifindex = mld_restore_context(m);
3073
3074 /*
3075 * Check if the ifnet still exists. This limits the scope of
3076 * any race in the absence of a global ifp lock for low cost
3077 * (an array lookup).
3078 */
3079 ifp = ifnet_byindex(ifindex);
3080 if (ifp == NULL) {
3081 CTR3(KTR_MLD, "%s: dropped %p as ifindex %u went away.",
3082 __func__, m, ifindex);
3083 m_freem(m);
3084 IP6STAT_INC(ip6s_noroute);
3085 goto out;
3086 }
3087
3088 im6o.im6o_multicast_hlim = 1;
3089 im6o.im6o_multicast_loop = (V_ip6_mrouter != NULL);
3090 im6o.im6o_multicast_ifp = ifp;
3091
3092 if (m->m_flags & M_MLDV1) {
3093 m0 = m;
3094 } else {
3095 m0 = mld_v2_encap_report(ifp, m);
3096 if (m0 == NULL) {
3097 CTR2(KTR_MLD, "%s: dropped %p", __func__, m);
3098 IP6STAT_INC(ip6s_odropped);
3099 goto out;
3100 }
3101 }
3102
3103 mld_scrub_context(m0);
3104 m_clrprotoflags(m);
3105 m0->m_pkthdr.rcvif = V_loif;
3106
3107 ip6 = mtod(m0, struct ip6_hdr *);
3108 #if 0
3109 (void)in6_setscope(&ip6->ip6_dst, ifp, NULL); /* XXX LOR */
3110 #else
3111 /*
3112 * XXX XXX Break some KPI rules to prevent an LOR which would
3113 * occur if we called in6_setscope() at transmission.
3114 * See comments at top of file.
3115 */
3116 MLD_EMBEDSCOPE(&ip6->ip6_dst, ifp->if_index);
3117 #endif
3118
3119 /*
3120 * Retrieve the ICMPv6 type before handoff to ip6_output(),
3121 * so we can bump the stats.
3122 */
3123 md = m_getptr(m0, sizeof(struct ip6_hdr), &off);
3124 mld = (struct mld_hdr *)(mtod(md, uint8_t *) + off);
3125 type = mld->mld_type;
3126
3127 oifp = NULL;
3128 error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, &im6o,
3129 &oifp, NULL);
3130 if (error) {
3131 CTR3(KTR_MLD, "%s: ip6_output(%p) = %d", __func__, m0, error);
3132 goto out;
3133 }
3134 ICMP6STAT_INC(icp6s_outhist[type]);
3135 if (oifp != NULL) {
3136 icmp6_ifstat_inc(oifp, ifs6_out_msg);
3137 switch (type) {
3138 case MLD_LISTENER_REPORT:
3139 case MLDV2_LISTENER_REPORT:
3140 icmp6_ifstat_inc(oifp, ifs6_out_mldreport);
3141 break;
3142 case MLD_LISTENER_DONE:
3143 icmp6_ifstat_inc(oifp, ifs6_out_mlddone);
3144 break;
3145 }
3146 }
3147 out:
3148 return;
3149 }
3150
3151 /*
3152 * Encapsulate an MLDv2 report.
3153 *
3154 * KAME IPv6 requires that hop-by-hop options be passed separately,
3155 * and that the IPv6 header be prepended in a separate mbuf.
3156 *
3157 * Returns a pointer to the new mbuf chain head, or NULL if the
3158 * allocation failed.
3159 */
3160 static struct mbuf *
3161 mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
3162 {
3163 struct mbuf *mh;
3164 struct mldv2_report *mld;
3165 struct ip6_hdr *ip6;
3166 struct in6_ifaddr *ia;
3167 int mldreclen;
3168
3169 KASSERT(ifp != NULL, ("%s: null ifp", __func__));
3170 KASSERT((m->m_flags & M_PKTHDR),
3171 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3172
3173 /*
3174 * RFC3590: OK to send as :: or tentative during DAD.
3175 */
3176 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
3177 if (ia == NULL)
3178 CTR1(KTR_MLD, "%s: warning: ia is NULL", __func__);
3179
3180 mh = m_gethdr(M_NOWAIT, MT_DATA);
3181 if (mh == NULL) {
3182 if (ia != NULL)
3183 ifa_free(&ia->ia_ifa);
3184 m_freem(m);
3185 return (NULL);
3186 }
3187 M_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report));
3188
3189 mldreclen = m_length(m, NULL);
3190 CTR2(KTR_MLD, "%s: mldreclen is %d", __func__, mldreclen);
3191
3192 mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report);
3193 mh->m_pkthdr.len = sizeof(struct ip6_hdr) +
3194 sizeof(struct mldv2_report) + mldreclen;
3195
3196 ip6 = mtod(mh, struct ip6_hdr *);
3197 ip6->ip6_flow = 0;
3198 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
3199 ip6->ip6_vfc |= IPV6_VERSION;
3200 ip6->ip6_nxt = IPPROTO_ICMPV6;
3201 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
3202 if (ia != NULL)
3203 ifa_free(&ia->ia_ifa);
3204 ip6->ip6_dst = in6addr_linklocal_allv2routers;
3205 /* scope ID will be set in netisr */
3206
3207 mld = (struct mldv2_report *)(ip6 + 1);
3208 mld->mld_type = MLDV2_LISTENER_REPORT;
3209 mld->mld_code = 0;
3210 mld->mld_cksum = 0;
3211 mld->mld_v2_reserved = 0;
3212 mld->mld_v2_numrecs = htons(m->m_pkthdr.PH_vt.vt_nrecs);
3213 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3214
3215 mh->m_next = m;
3216 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
3217 sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen);
3218 return (mh);
3219 }
3220
3221 #ifdef KTR
3222 static char *
3223 mld_rec_type_to_str(const int type)
3224 {
3225
3226 switch (type) {
3227 case MLD_CHANGE_TO_EXCLUDE_MODE:
3228 return "TO_EX";
3229 break;
3230 case MLD_CHANGE_TO_INCLUDE_MODE:
3231 return "TO_IN";
3232 break;
3233 case MLD_MODE_IS_EXCLUDE:
3234 return "MODE_EX";
3235 break;
3236 case MLD_MODE_IS_INCLUDE:
3237 return "MODE_IN";
3238 break;
3239 case MLD_ALLOW_NEW_SOURCES:
3240 return "ALLOW_NEW";
3241 break;
3242 case MLD_BLOCK_OLD_SOURCES:
3243 return "BLOCK_OLD";
3244 break;
3245 default:
3246 break;
3247 }
3248 return "unknown";
3249 }
3250 #endif
3251
3252 static void
3253 mld_init(void *unused __unused)
3254 {
3255
3256 CTR1(KTR_MLD, "%s: initializing", __func__);
3257 MLD_LOCK_INIT();
3258
3259 ip6_initpktopts(&mld_po);
3260 mld_po.ip6po_hlim = 1;
3261 mld_po.ip6po_hbh = &mld_ra.hbh;
3262 mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER;
3263 mld_po.ip6po_flags = IP6PO_DONTFRAG;
3264 }
3265 SYSINIT(mld_init, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_init, NULL);
3266
3267 static void
3268 mld_uninit(void *unused __unused)
3269 {
3270
3271 CTR1(KTR_MLD, "%s: tearing down", __func__);
3272 MLD_LOCK_DESTROY();
3273 }
3274 SYSUNINIT(mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_uninit, NULL);
3275
3276 static void
3277 vnet_mld_init(const void *unused __unused)
3278 {
3279
3280 CTR1(KTR_MLD, "%s: initializing", __func__);
3281
3282 LIST_INIT(&V_mli_head);
3283 }
3284 VNET_SYSINIT(vnet_mld_init, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_init,
3285 NULL);
3286
3287 static void
3288 vnet_mld_uninit(const void *unused __unused)
3289 {
3290
3291 /* This can happen if we shutdown the network stack. */
3292 CTR1(KTR_MLD, "%s: tearing down", __func__);
3293 }
3294 VNET_SYSUNINIT(vnet_mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_uninit,
3295 NULL);
3296
3297 static int
3298 mld_modevent(module_t mod, int type, void *unused __unused)
3299 {
3300
3301 switch (type) {
3302 case MOD_LOAD:
3303 case MOD_UNLOAD:
3304 break;
3305 default:
3306 return (EOPNOTSUPP);
3307 }
3308 return (0);
3309 }
3310
3311 static moduledata_t mld_mod = {
3312 "mld",
3313 mld_modevent,
3314 0
3315 };
3316 DECLARE_MODULE(mld, mld_mod, SI_SUB_PROTO_MC, SI_ORDER_ANY);
Cache object: e416a2b1284bcf4b81a01440ecae5d66
|