FreeBSD/Linux Kernel Cross Reference
sys/netinet/igmp.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2007-2009 Bruce Simpson.
5 * Copyright (c) 1988 Stephen Deering.
6 * Copyright (c) 1992, 1993
7 * The Regents of the University of California. All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * Stephen Deering of Stanford University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
37 */
38
39 /*
40 * Internet Group Management Protocol (IGMP) routines.
41 * [RFC1112, RFC2236, RFC3376]
42 *
43 * Written by Steve Deering, Stanford, May 1988.
44 * Modified by Rosen Sharma, Stanford, Aug 1994.
45 * Modified by Bill Fenner, Xerox PARC, Feb 1995.
46 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995.
47 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson.
48 *
49 * MULTICAST Revision: 3.5.1.4
50 */
51
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD: releng/12.0/sys/netinet/igmp.c 336676 2018-07-24 16:35:52Z andrew $");
54
55 #include "opt_ddb.h"
56
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/module.h>
60 #include <sys/malloc.h>
61 #include <sys/mbuf.h>
62 #include <sys/socket.h>
63 #include <sys/protosw.h>
64 #include <sys/kernel.h>
65 #include <sys/lock.h>
66 #include <sys/rmlock.h>
67 #include <sys/sysctl.h>
68 #include <sys/ktr.h>
69 #include <sys/condvar.h>
70
71 #ifdef DDB
72 #include <ddb/ddb.h>
73 #endif
74
75 #include <net/if.h>
76 #include <net/if_var.h>
77 #include <net/netisr.h>
78 #include <net/vnet.h>
79
80 #include <netinet/in.h>
81 #include <netinet/in_var.h>
82 #include <netinet/in_systm.h>
83 #include <netinet/ip.h>
84 #include <netinet/ip_var.h>
85 #include <netinet/ip_options.h>
86 #include <netinet/igmp.h>
87 #include <netinet/igmp_var.h>
88
89 #include <machine/in_cksum.h>
90
91 #include <security/mac/mac_framework.h>
92
93 #ifndef KTR_IGMPV3
94 #define KTR_IGMPV3 KTR_INET
95 #endif
96
97 static struct igmp_ifsoftc *
98 igi_alloc_locked(struct ifnet *);
99 static void igi_delete_locked(const struct ifnet *);
100 static void igmp_dispatch_queue(struct mbufq *, int, const int);
101 static void igmp_fasttimo_vnet(void);
102 static void igmp_final_leave(struct in_multi *, struct igmp_ifsoftc *);
103 static int igmp_handle_state_change(struct in_multi *,
104 struct igmp_ifsoftc *);
105 static int igmp_initial_join(struct in_multi *, struct igmp_ifsoftc *);
106 static int igmp_input_v1_query(struct ifnet *, const struct ip *,
107 const struct igmp *);
108 static int igmp_input_v2_query(struct ifnet *, const struct ip *,
109 const struct igmp *);
110 static int igmp_input_v3_query(struct ifnet *, const struct ip *,
111 /*const*/ struct igmpv3 *);
112 static int igmp_input_v3_group_query(struct in_multi *,
113 struct igmp_ifsoftc *, int, /*const*/ struct igmpv3 *);
114 static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *,
115 /*const*/ struct igmp *);
116 static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *,
117 /*const*/ struct igmp *);
118 static void igmp_intr(struct mbuf *);
119 static int igmp_isgroupreported(const struct in_addr);
120 static struct mbuf *
121 igmp_ra_alloc(void);
122 #ifdef KTR
123 static char * igmp_rec_type_to_str(const int);
124 #endif
125 static void igmp_set_version(struct igmp_ifsoftc *, const int);
126 static void igmp_slowtimo_vnet(void);
127 static int igmp_v1v2_queue_report(struct in_multi *, const int);
128 static void igmp_v1v2_process_group_timer(struct in_multi *, const int);
129 static void igmp_v1v2_process_querier_timers(struct igmp_ifsoftc *);
130 static void igmp_v2_update_group(struct in_multi *, const int);
131 static void igmp_v3_cancel_link_timers(struct igmp_ifsoftc *);
132 static void igmp_v3_dispatch_general_query(struct igmp_ifsoftc *);
133 static struct mbuf *
134 igmp_v3_encap_report(struct ifnet *, struct mbuf *);
135 static int igmp_v3_enqueue_group_record(struct mbufq *,
136 struct in_multi *, const int, const int, const int);
137 static int igmp_v3_enqueue_filter_change(struct mbufq *,
138 struct in_multi *);
139 static void igmp_v3_process_group_timers(struct in_multi_head *,
140 struct mbufq *, struct mbufq *, struct in_multi *,
141 const int);
142 static int igmp_v3_merge_state_changes(struct in_multi *,
143 struct mbufq *);
144 static void igmp_v3_suppress_group_record(struct in_multi *);
145 static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS);
146 static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS);
147 static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS);
148
149 static const struct netisr_handler igmp_nh = {
150 .nh_name = "igmp",
151 .nh_handler = igmp_intr,
152 .nh_proto = NETISR_IGMP,
153 .nh_policy = NETISR_POLICY_SOURCE,
154 };
155
156 /*
157 * System-wide globals.
158 *
159 * Unlocked access to these is OK, except for the global IGMP output
160 * queue. The IGMP subsystem lock ends up being system-wide for the moment,
161 * because all VIMAGEs have to share a global output queue, as netisrs
162 * themselves are not virtualized.
163 *
164 * Locking:
165 * * The permitted lock order is: IN_MULTI_LIST_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
166 * Any may be taken independently; if any are held at the same
167 * time, the above lock order must be followed.
168 * * All output is delegated to the netisr.
169 * Now that Giant has been eliminated, the netisr may be inlined.
170 * * IN_MULTI_LIST_LOCK covers in_multi.
171 * * IGMP_LOCK covers igmp_ifsoftc and any global variables in this file,
172 * including the output queue.
173 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
174 * per-link state iterators.
175 * * igmp_ifsoftc is valid as long as PF_INET is attached to the interface,
176 * therefore it is not refcounted.
177 * We allow unlocked reads of igmp_ifsoftc when accessed via in_multi.
178 *
179 * Reference counting
180 * * IGMP acquires its own reference every time an in_multi is passed to
181 * it and the group is being joined for the first time.
182 * * IGMP releases its reference(s) on in_multi in a deferred way,
183 * because the operations which process the release run as part of
184 * a loop whose control variables are directly affected by the release
185 * (that, and not recursing on the IF_ADDR_LOCK).
186 *
187 * VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds
188 * to a vnet in ifp->if_vnet.
189 *
190 * SMPng: XXX We may potentially race operations on ifma_protospec.
191 * The problem is that we currently lack a clean way of taking the
192 * IF_ADDR_LOCK() between the ifnet and in layers w/o recursing,
193 * as anything which modifies ifma needs to be covered by that lock.
194 * So check for ifma_protospec being NULL before proceeding.
195 */
196 struct mtx igmp_mtx;
197
198 struct mbuf *m_raopt; /* Router Alert option */
199 static MALLOC_DEFINE(M_IGMP, "igmp", "igmp state");
200
201 /*
202 * VIMAGE-wide globals.
203 *
204 * The IGMPv3 timers themselves need to run per-image, however,
205 * protosw timers run globally (see tcp).
206 * An ifnet can only be in one vimage at a time, and the loopback
207 * ifnet, loif, is itself virtualized.
208 * It would otherwise be possible to seriously hose IGMP state,
209 * and create inconsistencies in upstream multicast routing, if you have
210 * multiple VIMAGEs running on the same link joining different multicast
211 * groups, UNLESS the "primary IP address" is different. This is because
212 * IGMP for IPv4 does not force link-local addresses to be used for each
213 * node, unlike MLD for IPv6.
214 * Obviously the IGMPv3 per-interface state has per-vimage granularity
215 * also as a result.
216 *
217 * FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection
218 * policy to control the address used by IGMP on the link.
219 */
220 VNET_DEFINE_STATIC(int, interface_timers_running); /* IGMPv3 general
221 * query response */
222 VNET_DEFINE_STATIC(int, state_change_timers_running); /* IGMPv3 state-change
223 * retransmit */
224 VNET_DEFINE_STATIC(int, current_state_timers_running); /* IGMPv1/v2 host
225 * report; IGMPv3 g/sg
226 * query response */
227
228 #define V_interface_timers_running VNET(interface_timers_running)
229 #define V_state_change_timers_running VNET(state_change_timers_running)
230 #define V_current_state_timers_running VNET(current_state_timers_running)
231
232 VNET_DEFINE_STATIC(LIST_HEAD(, igmp_ifsoftc), igi_head) =
233 LIST_HEAD_INITIALIZER(igi_head);
234 VNET_DEFINE_STATIC(struct igmpstat, igmpstat) = {
235 .igps_version = IGPS_VERSION_3,
236 .igps_len = sizeof(struct igmpstat),
237 };
238 VNET_DEFINE_STATIC(struct timeval, igmp_gsrdelay) = {10, 0};
239
240 #define V_igi_head VNET(igi_head)
241 #define V_igmpstat VNET(igmpstat)
242 #define V_igmp_gsrdelay VNET(igmp_gsrdelay)
243
244 VNET_DEFINE_STATIC(int, igmp_recvifkludge) = 1;
245 VNET_DEFINE_STATIC(int, igmp_sendra) = 1;
246 VNET_DEFINE_STATIC(int, igmp_sendlocal) = 1;
247 VNET_DEFINE_STATIC(int, igmp_v1enable) = 1;
248 VNET_DEFINE_STATIC(int, igmp_v2enable) = 1;
249 VNET_DEFINE_STATIC(int, igmp_legacysupp);
250 VNET_DEFINE_STATIC(int, igmp_default_version) = IGMP_VERSION_3;
251
252 #define V_igmp_recvifkludge VNET(igmp_recvifkludge)
253 #define V_igmp_sendra VNET(igmp_sendra)
254 #define V_igmp_sendlocal VNET(igmp_sendlocal)
255 #define V_igmp_v1enable VNET(igmp_v1enable)
256 #define V_igmp_v2enable VNET(igmp_v2enable)
257 #define V_igmp_legacysupp VNET(igmp_legacysupp)
258 #define V_igmp_default_version VNET(igmp_default_version)
259
260 /*
261 * Virtualized sysctls.
262 */
263 SYSCTL_STRUCT(_net_inet_igmp, IGMPCTL_STATS, stats, CTLFLAG_VNET | CTLFLAG_RW,
264 &VNET_NAME(igmpstat), igmpstat, "");
265 SYSCTL_INT(_net_inet_igmp, OID_AUTO, recvifkludge, CTLFLAG_VNET | CTLFLAG_RW,
266 &VNET_NAME(igmp_recvifkludge), 0,
267 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address");
268 SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendra, CTLFLAG_VNET | CTLFLAG_RW,
269 &VNET_NAME(igmp_sendra), 0,
270 "Send IP Router Alert option in IGMPv2/v3 messages");
271 SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendlocal, CTLFLAG_VNET | CTLFLAG_RW,
272 &VNET_NAME(igmp_sendlocal), 0,
273 "Send IGMP membership reports for 224.0.0.0/24 groups");
274 SYSCTL_INT(_net_inet_igmp, OID_AUTO, v1enable, CTLFLAG_VNET | CTLFLAG_RW,
275 &VNET_NAME(igmp_v1enable), 0,
276 "Enable backwards compatibility with IGMPv1");
277 SYSCTL_INT(_net_inet_igmp, OID_AUTO, v2enable, CTLFLAG_VNET | CTLFLAG_RW,
278 &VNET_NAME(igmp_v2enable), 0,
279 "Enable backwards compatibility with IGMPv2");
280 SYSCTL_INT(_net_inet_igmp, OID_AUTO, legacysupp, CTLFLAG_VNET | CTLFLAG_RW,
281 &VNET_NAME(igmp_legacysupp), 0,
282 "Allow v1/v2 reports to suppress v3 group responses");
283 SYSCTL_PROC(_net_inet_igmp, OID_AUTO, default_version,
284 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
285 &VNET_NAME(igmp_default_version), 0, sysctl_igmp_default_version, "I",
286 "Default version of IGMP to run on each interface");
287 SYSCTL_PROC(_net_inet_igmp, OID_AUTO, gsrdelay,
288 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
289 &VNET_NAME(igmp_gsrdelay.tv_sec), 0, sysctl_igmp_gsr, "I",
290 "Rate limit for IGMPv3 Group-and-Source queries in seconds");
291
292 /*
293 * Non-virtualized sysctls.
294 */
295 static SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo,
296 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_igmp_ifinfo,
297 "Per-interface IGMPv3 state");
298
299 static __inline void
300 igmp_save_context(struct mbuf *m, struct ifnet *ifp)
301 {
302
303 #ifdef VIMAGE
304 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
305 #endif /* VIMAGE */
306 m->m_pkthdr.flowid = ifp->if_index;
307 }
308
309 static __inline void
310 igmp_scrub_context(struct mbuf *m)
311 {
312
313 m->m_pkthdr.PH_loc.ptr = NULL;
314 m->m_pkthdr.flowid = 0;
315 }
316
317 /*
318 * Restore context from a queued IGMP output chain.
319 * Return saved ifindex.
320 *
321 * VIMAGE: The assertion is there to make sure that we
322 * actually called CURVNET_SET() with what's in the mbuf chain.
323 */
324 static __inline uint32_t
325 igmp_restore_context(struct mbuf *m)
326 {
327
328 #ifdef notyet
329 #if defined(VIMAGE) && defined(INVARIANTS)
330 KASSERT(curvnet == (m->m_pkthdr.PH_loc.ptr),
331 ("%s: called when curvnet was not restored", __func__));
332 #endif
333 #endif
334 return (m->m_pkthdr.flowid);
335 }
336
337 /*
338 * Retrieve or set default IGMP version.
339 *
340 * VIMAGE: Assume curvnet set by caller.
341 * SMPng: NOTE: Serialized by IGMP lock.
342 */
343 static int
344 sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS)
345 {
346 int error;
347 int new;
348
349 error = sysctl_wire_old_buffer(req, sizeof(int));
350 if (error)
351 return (error);
352
353 IGMP_LOCK();
354
355 new = V_igmp_default_version;
356
357 error = sysctl_handle_int(oidp, &new, 0, req);
358 if (error || !req->newptr)
359 goto out_locked;
360
361 if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) {
362 error = EINVAL;
363 goto out_locked;
364 }
365
366 CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d",
367 V_igmp_default_version, new);
368
369 V_igmp_default_version = new;
370
371 out_locked:
372 IGMP_UNLOCK();
373 return (error);
374 }
375
376 /*
377 * Retrieve or set threshold between group-source queries in seconds.
378 *
379 * VIMAGE: Assume curvnet set by caller.
380 * SMPng: NOTE: Serialized by IGMP lock.
381 */
382 static int
383 sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS)
384 {
385 int error;
386 int i;
387
388 error = sysctl_wire_old_buffer(req, sizeof(int));
389 if (error)
390 return (error);
391
392 IGMP_LOCK();
393
394 i = V_igmp_gsrdelay.tv_sec;
395
396 error = sysctl_handle_int(oidp, &i, 0, req);
397 if (error || !req->newptr)
398 goto out_locked;
399
400 if (i < -1 || i >= 60) {
401 error = EINVAL;
402 goto out_locked;
403 }
404
405 CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d",
406 V_igmp_gsrdelay.tv_sec, i);
407 V_igmp_gsrdelay.tv_sec = i;
408
409 out_locked:
410 IGMP_UNLOCK();
411 return (error);
412 }
413
414 /*
415 * Expose struct igmp_ifsoftc to userland, keyed by ifindex.
416 * For use by ifmcstat(8).
417 *
418 * SMPng: NOTE: Does an unlocked ifindex space read.
419 * VIMAGE: Assume curvnet set by caller. The node handler itself
420 * is not directly virtualized.
421 */
422 static int
423 sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
424 {
425 int *name;
426 int error;
427 u_int namelen;
428 struct ifnet *ifp;
429 struct igmp_ifsoftc *igi;
430
431 name = (int *)arg1;
432 namelen = arg2;
433
434 if (req->newptr != NULL)
435 return (EPERM);
436
437 if (namelen != 1)
438 return (EINVAL);
439
440 error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo));
441 if (error)
442 return (error);
443
444 IN_MULTI_LIST_LOCK();
445 IGMP_LOCK();
446
447 if (name[0] <= 0 || name[0] > V_if_index) {
448 error = ENOENT;
449 goto out_locked;
450 }
451
452 error = ENOENT;
453
454 ifp = ifnet_byindex(name[0]);
455 if (ifp == NULL)
456 goto out_locked;
457
458 LIST_FOREACH(igi, &V_igi_head, igi_link) {
459 if (ifp == igi->igi_ifp) {
460 struct igmp_ifinfo info;
461
462 info.igi_version = igi->igi_version;
463 info.igi_v1_timer = igi->igi_v1_timer;
464 info.igi_v2_timer = igi->igi_v2_timer;
465 info.igi_v3_timer = igi->igi_v3_timer;
466 info.igi_flags = igi->igi_flags;
467 info.igi_rv = igi->igi_rv;
468 info.igi_qi = igi->igi_qi;
469 info.igi_qri = igi->igi_qri;
470 info.igi_uri = igi->igi_uri;
471 error = SYSCTL_OUT(req, &info, sizeof(info));
472 break;
473 }
474 }
475
476 out_locked:
477 IGMP_UNLOCK();
478 IN_MULTI_LIST_UNLOCK();
479 return (error);
480 }
481
482 /*
483 * Dispatch an entire queue of pending packet chains
484 * using the netisr.
485 * VIMAGE: Assumes the vnet pointer has been set.
486 */
487 static void
488 igmp_dispatch_queue(struct mbufq *mq, int limit, const int loop)
489 {
490 struct mbuf *m;
491
492 while ((m = mbufq_dequeue(mq)) != NULL) {
493 CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, mq, m);
494 if (loop)
495 m->m_flags |= M_IGMP_LOOP;
496 netisr_dispatch(NETISR_IGMP, m);
497 if (--limit == 0)
498 break;
499 }
500 }
501
502 /*
503 * Filter outgoing IGMP report state by group.
504 *
505 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1).
506 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are
507 * disabled for all groups in the 224.0.0.0/24 link-local scope. However,
508 * this may break certain IGMP snooping switches which rely on the old
509 * report behaviour.
510 *
511 * Return zero if the given group is one for which IGMP reports
512 * should be suppressed, or non-zero if reports should be issued.
513 */
514 static __inline int
515 igmp_isgroupreported(const struct in_addr addr)
516 {
517
518 if (in_allhosts(addr) ||
519 ((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr)))))
520 return (0);
521
522 return (1);
523 }
524
525 /*
526 * Construct a Router Alert option to use in outgoing packets.
527 */
528 static struct mbuf *
529 igmp_ra_alloc(void)
530 {
531 struct mbuf *m;
532 struct ipoption *p;
533
534 m = m_get(M_WAITOK, MT_DATA);
535 p = mtod(m, struct ipoption *);
536 p->ipopt_dst.s_addr = INADDR_ANY;
537 p->ipopt_list[0] = (char)IPOPT_RA; /* Router Alert Option */
538 p->ipopt_list[1] = 0x04; /* 4 bytes long */
539 p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */
540 p->ipopt_list[3] = 0x00; /* pad byte */
541 m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1];
542
543 return (m);
544 }
545
546 /*
547 * Attach IGMP when PF_INET is attached to an interface.
548 */
549 struct igmp_ifsoftc *
550 igmp_domifattach(struct ifnet *ifp)
551 {
552 struct igmp_ifsoftc *igi;
553
554 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
555 __func__, ifp, ifp->if_xname);
556
557 IGMP_LOCK();
558
559 igi = igi_alloc_locked(ifp);
560 if (!(ifp->if_flags & IFF_MULTICAST))
561 igi->igi_flags |= IGIF_SILENT;
562
563 IGMP_UNLOCK();
564
565 return (igi);
566 }
567
568 /*
569 * VIMAGE: assume curvnet set by caller.
570 */
571 static struct igmp_ifsoftc *
572 igi_alloc_locked(/*const*/ struct ifnet *ifp)
573 {
574 struct igmp_ifsoftc *igi;
575
576 IGMP_LOCK_ASSERT();
577
578 igi = malloc(sizeof(struct igmp_ifsoftc), M_IGMP, M_NOWAIT|M_ZERO);
579 if (igi == NULL)
580 goto out;
581
582 igi->igi_ifp = ifp;
583 igi->igi_version = V_igmp_default_version;
584 igi->igi_flags = 0;
585 igi->igi_rv = IGMP_RV_INIT;
586 igi->igi_qi = IGMP_QI_INIT;
587 igi->igi_qri = IGMP_QRI_INIT;
588 igi->igi_uri = IGMP_URI_INIT;
589 mbufq_init(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
590
591 LIST_INSERT_HEAD(&V_igi_head, igi, igi_link);
592
593 CTR2(KTR_IGMPV3, "allocate igmp_ifsoftc for ifp %p(%s)",
594 ifp, ifp->if_xname);
595
596 out:
597 return (igi);
598 }
599
600 /*
601 * Hook for ifdetach.
602 *
603 * NOTE: Some finalization tasks need to run before the protocol domain
604 * is detached, but also before the link layer does its cleanup.
605 *
606 * SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK().
607 * XXX This is also bitten by unlocked ifma_protospec access.
608 */
609 void
610 igmp_ifdetach(struct ifnet *ifp)
611 {
612 struct igmp_ifsoftc *igi;
613 struct ifmultiaddr *ifma, *next;
614 struct in_multi *inm;
615 struct in_multi_head inm_free_tmp;
616 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp,
617 ifp->if_xname);
618
619 SLIST_INIT(&inm_free_tmp);
620 IGMP_LOCK();
621
622 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
623 if (igi->igi_version == IGMP_VERSION_3) {
624 IF_ADDR_WLOCK(ifp);
625 restart:
626 CK_STAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) {
627 if (ifma->ifma_addr->sa_family != AF_INET ||
628 ifma->ifma_protospec == NULL)
629 continue;
630 inm = (struct in_multi *)ifma->ifma_protospec;
631 if (inm->inm_state == IGMP_LEAVING_MEMBER)
632 inm_rele_locked(&inm_free_tmp, inm);
633 inm_clear_recorded(inm);
634 if (__predict_false(ifma_restart)) {
635 ifma_restart = false;
636 goto restart;
637 }
638 }
639 IF_ADDR_WUNLOCK(ifp);
640 inm_release_list_deferred(&inm_free_tmp);
641 }
642 IGMP_UNLOCK();
643
644 }
645
646 /*
647 * Hook for domifdetach.
648 */
649 void
650 igmp_domifdetach(struct ifnet *ifp)
651 {
652
653 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
654 __func__, ifp, ifp->if_xname);
655
656 IGMP_LOCK();
657 igi_delete_locked(ifp);
658 IGMP_UNLOCK();
659 }
660
661 static void
662 igi_delete_locked(const struct ifnet *ifp)
663 {
664 struct igmp_ifsoftc *igi, *tigi;
665
666 CTR3(KTR_IGMPV3, "%s: freeing igmp_ifsoftc for ifp %p(%s)",
667 __func__, ifp, ifp->if_xname);
668
669 IGMP_LOCK_ASSERT();
670
671 LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) {
672 if (igi->igi_ifp == ifp) {
673 /*
674 * Free deferred General Query responses.
675 */
676 mbufq_drain(&igi->igi_gq);
677
678 LIST_REMOVE(igi, igi_link);
679 free(igi, M_IGMP);
680 return;
681 }
682 }
683 }
684
685 /*
686 * Process a received IGMPv1 query.
687 * Return non-zero if the message should be dropped.
688 *
689 * VIMAGE: The curvnet pointer is derived from the input ifp.
690 */
691 static int
692 igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip,
693 const struct igmp *igmp)
694 {
695 struct ifmultiaddr *ifma;
696 struct igmp_ifsoftc *igi;
697 struct in_multi *inm;
698
699 /*
700 * IGMPv1 Host Mmembership Queries SHOULD always be addressed to
701 * 224.0.0.1. They are always treated as General Queries.
702 * igmp_group is always ignored. Do not drop it as a userland
703 * daemon may wish to see it.
704 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
705 */
706 if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) {
707 IGMPSTAT_INC(igps_rcv_badqueries);
708 return (0);
709 }
710 IGMPSTAT_INC(igps_rcv_gen_queries);
711
712 IN_MULTI_LIST_LOCK();
713 IGMP_LOCK();
714
715 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
716 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
717
718 if (igi->igi_flags & IGIF_LOOPBACK) {
719 CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)",
720 ifp, ifp->if_xname);
721 goto out_locked;
722 }
723
724 /*
725 * Switch to IGMPv1 host compatibility mode.
726 */
727 igmp_set_version(igi, IGMP_VERSION_1);
728
729 CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname);
730
731 /*
732 * Start the timers in all of our group records
733 * for the interface on which the query arrived,
734 * except those which are already running.
735 */
736 IF_ADDR_RLOCK(ifp);
737 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
738 if (ifma->ifma_addr->sa_family != AF_INET ||
739 ifma->ifma_protospec == NULL)
740 continue;
741 inm = (struct in_multi *)ifma->ifma_protospec;
742 if (inm->inm_timer != 0)
743 continue;
744 switch (inm->inm_state) {
745 case IGMP_NOT_MEMBER:
746 case IGMP_SILENT_MEMBER:
747 break;
748 case IGMP_G_QUERY_PENDING_MEMBER:
749 case IGMP_SG_QUERY_PENDING_MEMBER:
750 case IGMP_REPORTING_MEMBER:
751 case IGMP_IDLE_MEMBER:
752 case IGMP_LAZY_MEMBER:
753 case IGMP_SLEEPING_MEMBER:
754 case IGMP_AWAKENING_MEMBER:
755 inm->inm_state = IGMP_REPORTING_MEMBER;
756 inm->inm_timer = IGMP_RANDOM_DELAY(
757 IGMP_V1V2_MAX_RI * PR_FASTHZ);
758 V_current_state_timers_running = 1;
759 break;
760 case IGMP_LEAVING_MEMBER:
761 break;
762 }
763 }
764 IF_ADDR_RUNLOCK(ifp);
765
766 out_locked:
767 IGMP_UNLOCK();
768 IN_MULTI_LIST_UNLOCK();
769
770 return (0);
771 }
772
773 /*
774 * Process a received IGMPv2 general or group-specific query.
775 */
776 static int
777 igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip,
778 const struct igmp *igmp)
779 {
780 struct ifmultiaddr *ifma;
781 struct igmp_ifsoftc *igi;
782 struct in_multi *inm;
783 int is_general_query;
784 uint16_t timer;
785
786 is_general_query = 0;
787
788 /*
789 * Validate address fields upfront.
790 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
791 */
792 if (in_nullhost(igmp->igmp_group)) {
793 /*
794 * IGMPv2 General Query.
795 * If this was not sent to the all-hosts group, ignore it.
796 */
797 if (!in_allhosts(ip->ip_dst))
798 return (0);
799 IGMPSTAT_INC(igps_rcv_gen_queries);
800 is_general_query = 1;
801 } else {
802 /* IGMPv2 Group-Specific Query. */
803 IGMPSTAT_INC(igps_rcv_group_queries);
804 }
805
806 IN_MULTI_LIST_LOCK();
807 IGMP_LOCK();
808
809 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
810 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
811
812 if (igi->igi_flags & IGIF_LOOPBACK) {
813 CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)",
814 ifp, ifp->if_xname);
815 goto out_locked;
816 }
817
818 /*
819 * Ignore v2 query if in v1 Compatibility Mode.
820 */
821 if (igi->igi_version == IGMP_VERSION_1)
822 goto out_locked;
823
824 igmp_set_version(igi, IGMP_VERSION_2);
825
826 timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE;
827 if (timer == 0)
828 timer = 1;
829
830 if (is_general_query) {
831 /*
832 * For each reporting group joined on this
833 * interface, kick the report timer.
834 */
835 CTR2(KTR_IGMPV3, "process v2 general query on ifp %p(%s)",
836 ifp, ifp->if_xname);
837 IF_ADDR_RLOCK(ifp);
838 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
839 if (ifma->ifma_addr->sa_family != AF_INET ||
840 ifma->ifma_protospec == NULL)
841 continue;
842 inm = (struct in_multi *)ifma->ifma_protospec;
843 igmp_v2_update_group(inm, timer);
844 }
845 IF_ADDR_RUNLOCK(ifp);
846 } else {
847 /*
848 * Group-specific IGMPv2 query, we need only
849 * look up the single group to process it.
850 */
851 inm = inm_lookup(ifp, igmp->igmp_group);
852 if (inm != NULL) {
853 CTR3(KTR_IGMPV3,
854 "process v2 query 0x%08x on ifp %p(%s)",
855 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
856 igmp_v2_update_group(inm, timer);
857 }
858 }
859
860 out_locked:
861 IGMP_UNLOCK();
862 IN_MULTI_LIST_UNLOCK();
863
864 return (0);
865 }
866
867 /*
868 * Update the report timer on a group in response to an IGMPv2 query.
869 *
870 * If we are becoming the reporting member for this group, start the timer.
871 * If we already are the reporting member for this group, and timer is
872 * below the threshold, reset it.
873 *
874 * We may be updating the group for the first time since we switched
875 * to IGMPv3. If we are, then we must clear any recorded source lists,
876 * and transition to REPORTING state; the group timer is overloaded
877 * for group and group-source query responses.
878 *
879 * Unlike IGMPv3, the delay per group should be jittered
880 * to avoid bursts of IGMPv2 reports.
881 */
882 static void
883 igmp_v2_update_group(struct in_multi *inm, const int timer)
884 {
885
886 CTR4(KTR_IGMPV3, "0x%08x: %s/%s timer=%d", __func__,
887 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname, timer);
888
889 IN_MULTI_LIST_LOCK_ASSERT();
890
891 switch (inm->inm_state) {
892 case IGMP_NOT_MEMBER:
893 case IGMP_SILENT_MEMBER:
894 break;
895 case IGMP_REPORTING_MEMBER:
896 if (inm->inm_timer != 0 &&
897 inm->inm_timer <= timer) {
898 CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, "
899 "skipping.", __func__);
900 break;
901 }
902 /* FALLTHROUGH */
903 case IGMP_SG_QUERY_PENDING_MEMBER:
904 case IGMP_G_QUERY_PENDING_MEMBER:
905 case IGMP_IDLE_MEMBER:
906 case IGMP_LAZY_MEMBER:
907 case IGMP_AWAKENING_MEMBER:
908 CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__);
909 inm->inm_state = IGMP_REPORTING_MEMBER;
910 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
911 V_current_state_timers_running = 1;
912 break;
913 case IGMP_SLEEPING_MEMBER:
914 CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__);
915 inm->inm_state = IGMP_AWAKENING_MEMBER;
916 break;
917 case IGMP_LEAVING_MEMBER:
918 break;
919 }
920 }
921
922 /*
923 * Process a received IGMPv3 general, group-specific or
924 * group-and-source-specific query.
925 * Assumes m has already been pulled up to the full IGMP message length.
926 * Return 0 if successful, otherwise an appropriate error code is returned.
927 */
928 static int
929 igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip,
930 /*const*/ struct igmpv3 *igmpv3)
931 {
932 struct igmp_ifsoftc *igi;
933 struct in_multi *inm;
934 int is_general_query;
935 uint32_t maxresp, nsrc, qqi;
936 uint16_t timer;
937 uint8_t qrv;
938
939 is_general_query = 0;
940
941 CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname);
942
943 maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */
944 if (maxresp >= 128) {
945 maxresp = IGMP_MANT(igmpv3->igmp_code) <<
946 (IGMP_EXP(igmpv3->igmp_code) + 3);
947 }
948
949 /*
950 * Robustness must never be less than 2 for on-wire IGMPv3.
951 * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make
952 * an exception for interfaces whose IGMPv3 state changes
953 * are redirected to loopback (e.g. MANET).
954 */
955 qrv = IGMP_QRV(igmpv3->igmp_misc);
956 if (qrv < 2) {
957 CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__,
958 qrv, IGMP_RV_INIT);
959 qrv = IGMP_RV_INIT;
960 }
961
962 qqi = igmpv3->igmp_qqi;
963 if (qqi >= 128) {
964 qqi = IGMP_MANT(igmpv3->igmp_qqi) <<
965 (IGMP_EXP(igmpv3->igmp_qqi) + 3);
966 }
967
968 timer = maxresp * PR_FASTHZ / IGMP_TIMER_SCALE;
969 if (timer == 0)
970 timer = 1;
971
972 nsrc = ntohs(igmpv3->igmp_numsrc);
973
974 /*
975 * Validate address fields and versions upfront before
976 * accepting v3 query.
977 * XXX SMPng: Unlocked access to igmpstat counters here.
978 */
979 if (in_nullhost(igmpv3->igmp_group)) {
980 /*
981 * IGMPv3 General Query.
982 *
983 * General Queries SHOULD be directed to 224.0.0.1.
984 * A general query with a source list has undefined
985 * behaviour; discard it.
986 */
987 IGMPSTAT_INC(igps_rcv_gen_queries);
988 if (!in_allhosts(ip->ip_dst) || nsrc > 0) {
989 IGMPSTAT_INC(igps_rcv_badqueries);
990 return (0);
991 }
992 is_general_query = 1;
993 } else {
994 /* Group or group-source specific query. */
995 if (nsrc == 0)
996 IGMPSTAT_INC(igps_rcv_group_queries);
997 else
998 IGMPSTAT_INC(igps_rcv_gsr_queries);
999 }
1000
1001 IN_MULTI_LIST_LOCK();
1002 IGMP_LOCK();
1003
1004 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
1005 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
1006
1007 if (igi->igi_flags & IGIF_LOOPBACK) {
1008 CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)",
1009 ifp, ifp->if_xname);
1010 goto out_locked;
1011 }
1012
1013 /*
1014 * Discard the v3 query if we're in Compatibility Mode.
1015 * The RFC is not obviously worded that hosts need to stay in
1016 * compatibility mode until the Old Version Querier Present
1017 * timer expires.
1018 */
1019 if (igi->igi_version != IGMP_VERSION_3) {
1020 CTR3(KTR_IGMPV3, "ignore v3 query in v%d mode on ifp %p(%s)",
1021 igi->igi_version, ifp, ifp->if_xname);
1022 goto out_locked;
1023 }
1024
1025 igmp_set_version(igi, IGMP_VERSION_3);
1026 igi->igi_rv = qrv;
1027 igi->igi_qi = qqi;
1028 igi->igi_qri = maxresp;
1029
1030 CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi,
1031 maxresp);
1032
1033 if (is_general_query) {
1034 /*
1035 * Schedule a current-state report on this ifp for
1036 * all groups, possibly containing source lists.
1037 * If there is a pending General Query response
1038 * scheduled earlier than the selected delay, do
1039 * not schedule any other reports.
1040 * Otherwise, reset the interface timer.
1041 */
1042 CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)",
1043 ifp, ifp->if_xname);
1044 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) {
1045 igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer);
1046 V_interface_timers_running = 1;
1047 }
1048 } else {
1049 /*
1050 * Group-source-specific queries are throttled on
1051 * a per-group basis to defeat denial-of-service attempts.
1052 * Queries for groups we are not a member of on this
1053 * link are simply ignored.
1054 */
1055 inm = inm_lookup(ifp, igmpv3->igmp_group);
1056 if (inm == NULL)
1057 goto out_locked;
1058 if (nsrc > 0) {
1059 if (!ratecheck(&inm->inm_lastgsrtv,
1060 &V_igmp_gsrdelay)) {
1061 CTR1(KTR_IGMPV3, "%s: GS query throttled.",
1062 __func__);
1063 IGMPSTAT_INC(igps_drop_gsr_queries);
1064 goto out_locked;
1065 }
1066 }
1067 CTR3(KTR_IGMPV3, "process v3 0x%08x query on ifp %p(%s)",
1068 ntohl(igmpv3->igmp_group.s_addr), ifp, ifp->if_xname);
1069 /*
1070 * If there is a pending General Query response
1071 * scheduled sooner than the selected delay, no
1072 * further report need be scheduled.
1073 * Otherwise, prepare to respond to the
1074 * group-specific or group-and-source query.
1075 */
1076 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer)
1077 igmp_input_v3_group_query(inm, igi, timer, igmpv3);
1078 }
1079
1080 out_locked:
1081 IGMP_UNLOCK();
1082 IN_MULTI_LIST_UNLOCK();
1083
1084 return (0);
1085 }
1086
1087 /*
1088 * Process a received IGMPv3 group-specific or group-and-source-specific
1089 * query.
1090 * Return <0 if any error occurred. Currently this is ignored.
1091 */
1092 static int
1093 igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifsoftc *igi,
1094 int timer, /*const*/ struct igmpv3 *igmpv3)
1095 {
1096 int retval;
1097 uint16_t nsrc;
1098
1099 IN_MULTI_LIST_LOCK_ASSERT();
1100 IGMP_LOCK_ASSERT();
1101
1102 retval = 0;
1103
1104 switch (inm->inm_state) {
1105 case IGMP_NOT_MEMBER:
1106 case IGMP_SILENT_MEMBER:
1107 case IGMP_SLEEPING_MEMBER:
1108 case IGMP_LAZY_MEMBER:
1109 case IGMP_AWAKENING_MEMBER:
1110 case IGMP_IDLE_MEMBER:
1111 case IGMP_LEAVING_MEMBER:
1112 return (retval);
1113 break;
1114 case IGMP_REPORTING_MEMBER:
1115 case IGMP_G_QUERY_PENDING_MEMBER:
1116 case IGMP_SG_QUERY_PENDING_MEMBER:
1117 break;
1118 }
1119
1120 nsrc = ntohs(igmpv3->igmp_numsrc);
1121
1122 /*
1123 * Deal with group-specific queries upfront.
1124 * If any group query is already pending, purge any recorded
1125 * source-list state if it exists, and schedule a query response
1126 * for this group-specific query.
1127 */
1128 if (nsrc == 0) {
1129 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
1130 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) {
1131 inm_clear_recorded(inm);
1132 timer = min(inm->inm_timer, timer);
1133 }
1134 inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER;
1135 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1136 V_current_state_timers_running = 1;
1137 return (retval);
1138 }
1139
1140 /*
1141 * Deal with the case where a group-and-source-specific query has
1142 * been received but a group-specific query is already pending.
1143 */
1144 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) {
1145 timer = min(inm->inm_timer, timer);
1146 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1147 V_current_state_timers_running = 1;
1148 return (retval);
1149 }
1150
1151 /*
1152 * Finally, deal with the case where a group-and-source-specific
1153 * query has been received, where a response to a previous g-s-r
1154 * query exists, or none exists.
1155 * In this case, we need to parse the source-list which the Querier
1156 * has provided us with and check if we have any source list filter
1157 * entries at T1 for these sources. If we do not, there is no need
1158 * schedule a report and the query may be dropped.
1159 * If we do, we must record them and schedule a current-state
1160 * report for those sources.
1161 * FIXME: Handling source lists larger than 1 mbuf requires that
1162 * we pass the mbuf chain pointer down to this function, and use
1163 * m_getptr() to walk the chain.
1164 */
1165 if (inm->inm_nsrc > 0) {
1166 const struct in_addr *ap;
1167 int i, nrecorded;
1168
1169 ap = (const struct in_addr *)(igmpv3 + 1);
1170 nrecorded = 0;
1171 for (i = 0; i < nsrc; i++, ap++) {
1172 retval = inm_record_source(inm, ap->s_addr);
1173 if (retval < 0)
1174 break;
1175 nrecorded += retval;
1176 }
1177 if (nrecorded > 0) {
1178 CTR1(KTR_IGMPV3,
1179 "%s: schedule response to SG query", __func__);
1180 inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER;
1181 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1182 V_current_state_timers_running = 1;
1183 }
1184 }
1185
1186 return (retval);
1187 }
1188
1189 /*
1190 * Process a received IGMPv1 host membership report.
1191 *
1192 * NOTE: 0.0.0.0 workaround breaks const correctness.
1193 */
1194 static int
1195 igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1196 /*const*/ struct igmp *igmp)
1197 {
1198 struct rm_priotracker in_ifa_tracker;
1199 struct in_ifaddr *ia;
1200 struct in_multi *inm;
1201
1202 IGMPSTAT_INC(igps_rcv_reports);
1203
1204 if (ifp->if_flags & IFF_LOOPBACK)
1205 return (0);
1206
1207 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1208 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1209 IGMPSTAT_INC(igps_rcv_badreports);
1210 return (EINVAL);
1211 }
1212
1213 /*
1214 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1215 * Booting clients may use the source address 0.0.0.0. Some
1216 * IGMP daemons may not know how to use IP_RECVIF to determine
1217 * the interface upon which this message was received.
1218 * Replace 0.0.0.0 with the subnet address if told to do so.
1219 */
1220 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1221 NET_EPOCH_ENTER();
1222 IFP_TO_IA(ifp, ia, &in_ifa_tracker);
1223 if (ia != NULL)
1224 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1225 NET_EPOCH_EXIT();
1226 }
1227
1228 CTR3(KTR_IGMPV3, "process v1 report 0x%08x on ifp %p(%s)",
1229 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1230
1231 /*
1232 * IGMPv1 report suppression.
1233 * If we are a member of this group, and our membership should be
1234 * reported, stop our group timer and transition to the 'lazy' state.
1235 */
1236 IN_MULTI_LIST_LOCK();
1237 inm = inm_lookup(ifp, igmp->igmp_group);
1238 if (inm != NULL) {
1239 struct igmp_ifsoftc *igi;
1240
1241 igi = inm->inm_igi;
1242 if (igi == NULL) {
1243 KASSERT(igi != NULL,
1244 ("%s: no igi for ifp %p", __func__, ifp));
1245 goto out_locked;
1246 }
1247
1248 IGMPSTAT_INC(igps_rcv_ourreports);
1249
1250 /*
1251 * If we are in IGMPv3 host mode, do not allow the
1252 * other host's IGMPv1 report to suppress our reports
1253 * unless explicitly configured to do so.
1254 */
1255 if (igi->igi_version == IGMP_VERSION_3) {
1256 if (V_igmp_legacysupp)
1257 igmp_v3_suppress_group_record(inm);
1258 goto out_locked;
1259 }
1260
1261 inm->inm_timer = 0;
1262
1263 switch (inm->inm_state) {
1264 case IGMP_NOT_MEMBER:
1265 case IGMP_SILENT_MEMBER:
1266 break;
1267 case IGMP_IDLE_MEMBER:
1268 case IGMP_LAZY_MEMBER:
1269 case IGMP_AWAKENING_MEMBER:
1270 CTR3(KTR_IGMPV3,
1271 "report suppressed for 0x%08x on ifp %p(%s)",
1272 ntohl(igmp->igmp_group.s_addr), ifp,
1273 ifp->if_xname);
1274 case IGMP_SLEEPING_MEMBER:
1275 inm->inm_state = IGMP_SLEEPING_MEMBER;
1276 break;
1277 case IGMP_REPORTING_MEMBER:
1278 CTR3(KTR_IGMPV3,
1279 "report suppressed for 0x%08x on ifp %p(%s)",
1280 ntohl(igmp->igmp_group.s_addr), ifp,
1281 ifp->if_xname);
1282 if (igi->igi_version == IGMP_VERSION_1)
1283 inm->inm_state = IGMP_LAZY_MEMBER;
1284 else if (igi->igi_version == IGMP_VERSION_2)
1285 inm->inm_state = IGMP_SLEEPING_MEMBER;
1286 break;
1287 case IGMP_G_QUERY_PENDING_MEMBER:
1288 case IGMP_SG_QUERY_PENDING_MEMBER:
1289 case IGMP_LEAVING_MEMBER:
1290 break;
1291 }
1292 }
1293
1294 out_locked:
1295 IN_MULTI_LIST_UNLOCK();
1296
1297 return (0);
1298 }
1299
1300 /*
1301 * Process a received IGMPv2 host membership report.
1302 *
1303 * NOTE: 0.0.0.0 workaround breaks const correctness.
1304 */
1305 static int
1306 igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1307 /*const*/ struct igmp *igmp)
1308 {
1309 struct rm_priotracker in_ifa_tracker;
1310 struct in_ifaddr *ia;
1311 struct in_multi *inm;
1312
1313 /*
1314 * Make sure we don't hear our own membership report. Fast
1315 * leave requires knowing that we are the only member of a
1316 * group.
1317 */
1318 NET_EPOCH_ENTER();
1319 IFP_TO_IA(ifp, ia, &in_ifa_tracker);
1320 if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) {
1321 NET_EPOCH_EXIT();
1322 return (0);
1323 }
1324
1325 IGMPSTAT_INC(igps_rcv_reports);
1326
1327 if (ifp->if_flags & IFF_LOOPBACK) {
1328 NET_EPOCH_EXIT();
1329 return (0);
1330 }
1331
1332 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1333 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1334 NET_EPOCH_EXIT();
1335 IGMPSTAT_INC(igps_rcv_badreports);
1336 return (EINVAL);
1337 }
1338
1339 /*
1340 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1341 * Booting clients may use the source address 0.0.0.0. Some
1342 * IGMP daemons may not know how to use IP_RECVIF to determine
1343 * the interface upon which this message was received.
1344 * Replace 0.0.0.0 with the subnet address if told to do so.
1345 */
1346 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1347 if (ia != NULL)
1348 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1349 }
1350 NET_EPOCH_EXIT();
1351
1352 CTR3(KTR_IGMPV3, "process v2 report 0x%08x on ifp %p(%s)",
1353 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1354
1355 /*
1356 * IGMPv2 report suppression.
1357 * If we are a member of this group, and our membership should be
1358 * reported, and our group timer is pending or about to be reset,
1359 * stop our group timer by transitioning to the 'lazy' state.
1360 */
1361 IN_MULTI_LIST_LOCK();
1362 inm = inm_lookup(ifp, igmp->igmp_group);
1363 if (inm != NULL) {
1364 struct igmp_ifsoftc *igi;
1365
1366 igi = inm->inm_igi;
1367 KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp));
1368
1369 IGMPSTAT_INC(igps_rcv_ourreports);
1370
1371 /*
1372 * If we are in IGMPv3 host mode, do not allow the
1373 * other host's IGMPv1 report to suppress our reports
1374 * unless explicitly configured to do so.
1375 */
1376 if (igi->igi_version == IGMP_VERSION_3) {
1377 if (V_igmp_legacysupp)
1378 igmp_v3_suppress_group_record(inm);
1379 goto out_locked;
1380 }
1381
1382 inm->inm_timer = 0;
1383
1384 switch (inm->inm_state) {
1385 case IGMP_NOT_MEMBER:
1386 case IGMP_SILENT_MEMBER:
1387 case IGMP_SLEEPING_MEMBER:
1388 break;
1389 case IGMP_REPORTING_MEMBER:
1390 case IGMP_IDLE_MEMBER:
1391 case IGMP_AWAKENING_MEMBER:
1392 CTR3(KTR_IGMPV3,
1393 "report suppressed for 0x%08x on ifp %p(%s)",
1394 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1395 case IGMP_LAZY_MEMBER:
1396 inm->inm_state = IGMP_LAZY_MEMBER;
1397 break;
1398 case IGMP_G_QUERY_PENDING_MEMBER:
1399 case IGMP_SG_QUERY_PENDING_MEMBER:
1400 case IGMP_LEAVING_MEMBER:
1401 break;
1402 }
1403 }
1404
1405 out_locked:
1406 IN_MULTI_LIST_UNLOCK();
1407
1408 return (0);
1409 }
1410
1411 int
1412 igmp_input(struct mbuf **mp, int *offp, int proto)
1413 {
1414 int iphlen;
1415 struct ifnet *ifp;
1416 struct igmp *igmp;
1417 struct ip *ip;
1418 struct mbuf *m;
1419 int igmplen;
1420 int minlen;
1421 int queryver;
1422
1423 CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, *mp, *offp);
1424
1425 m = *mp;
1426 ifp = m->m_pkthdr.rcvif;
1427 *mp = NULL;
1428
1429 IGMPSTAT_INC(igps_rcv_total);
1430
1431 ip = mtod(m, struct ip *);
1432 iphlen = *offp;
1433 igmplen = ntohs(ip->ip_len) - iphlen;
1434
1435 /*
1436 * Validate lengths.
1437 */
1438 if (igmplen < IGMP_MINLEN) {
1439 IGMPSTAT_INC(igps_rcv_tooshort);
1440 m_freem(m);
1441 return (IPPROTO_DONE);
1442 }
1443
1444 /*
1445 * Always pullup to the minimum size for v1/v2 or v3
1446 * to amortize calls to m_pullup().
1447 */
1448 minlen = iphlen;
1449 if (igmplen >= IGMP_V3_QUERY_MINLEN)
1450 minlen += IGMP_V3_QUERY_MINLEN;
1451 else
1452 minlen += IGMP_MINLEN;
1453 if ((!M_WRITABLE(m) || m->m_len < minlen) &&
1454 (m = m_pullup(m, minlen)) == NULL) {
1455 IGMPSTAT_INC(igps_rcv_tooshort);
1456 return (IPPROTO_DONE);
1457 }
1458 ip = mtod(m, struct ip *);
1459
1460 /*
1461 * Validate checksum.
1462 */
1463 m->m_data += iphlen;
1464 m->m_len -= iphlen;
1465 igmp = mtod(m, struct igmp *);
1466 if (in_cksum(m, igmplen)) {
1467 IGMPSTAT_INC(igps_rcv_badsum);
1468 m_freem(m);
1469 return (IPPROTO_DONE);
1470 }
1471 m->m_data -= iphlen;
1472 m->m_len += iphlen;
1473
1474 /*
1475 * IGMP control traffic is link-scope, and must have a TTL of 1.
1476 * DVMRP traffic (e.g. mrinfo, mtrace) is an exception;
1477 * probe packets may come from beyond the LAN.
1478 */
1479 if (igmp->igmp_type != IGMP_DVMRP && ip->ip_ttl != 1) {
1480 IGMPSTAT_INC(igps_rcv_badttl);
1481 m_freem(m);
1482 return (IPPROTO_DONE);
1483 }
1484
1485 switch (igmp->igmp_type) {
1486 case IGMP_HOST_MEMBERSHIP_QUERY:
1487 if (igmplen == IGMP_MINLEN) {
1488 if (igmp->igmp_code == 0)
1489 queryver = IGMP_VERSION_1;
1490 else
1491 queryver = IGMP_VERSION_2;
1492 } else if (igmplen >= IGMP_V3_QUERY_MINLEN) {
1493 queryver = IGMP_VERSION_3;
1494 } else {
1495 IGMPSTAT_INC(igps_rcv_tooshort);
1496 m_freem(m);
1497 return (IPPROTO_DONE);
1498 }
1499
1500 switch (queryver) {
1501 case IGMP_VERSION_1:
1502 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1503 if (!V_igmp_v1enable)
1504 break;
1505 if (igmp_input_v1_query(ifp, ip, igmp) != 0) {
1506 m_freem(m);
1507 return (IPPROTO_DONE);
1508 }
1509 break;
1510
1511 case IGMP_VERSION_2:
1512 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1513 if (!V_igmp_v2enable)
1514 break;
1515 if (igmp_input_v2_query(ifp, ip, igmp) != 0) {
1516 m_freem(m);
1517 return (IPPROTO_DONE);
1518 }
1519 break;
1520
1521 case IGMP_VERSION_3: {
1522 struct igmpv3 *igmpv3;
1523 uint16_t igmpv3len;
1524 uint16_t nsrc;
1525
1526 IGMPSTAT_INC(igps_rcv_v3_queries);
1527 igmpv3 = (struct igmpv3 *)igmp;
1528 /*
1529 * Validate length based on source count.
1530 */
1531 nsrc = ntohs(igmpv3->igmp_numsrc);
1532 if (nsrc * sizeof(in_addr_t) >
1533 UINT16_MAX - iphlen - IGMP_V3_QUERY_MINLEN) {
1534 IGMPSTAT_INC(igps_rcv_tooshort);
1535 return (IPPROTO_DONE);
1536 }
1537 /*
1538 * m_pullup() may modify m, so pullup in
1539 * this scope.
1540 */
1541 igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN +
1542 sizeof(struct in_addr) * nsrc;
1543 if ((!M_WRITABLE(m) ||
1544 m->m_len < igmpv3len) &&
1545 (m = m_pullup(m, igmpv3len)) == NULL) {
1546 IGMPSTAT_INC(igps_rcv_tooshort);
1547 return (IPPROTO_DONE);
1548 }
1549 igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *)
1550 + iphlen);
1551 if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) {
1552 m_freem(m);
1553 return (IPPROTO_DONE);
1554 }
1555 }
1556 break;
1557 }
1558 break;
1559
1560 case IGMP_v1_HOST_MEMBERSHIP_REPORT:
1561 if (!V_igmp_v1enable)
1562 break;
1563 if (igmp_input_v1_report(ifp, ip, igmp) != 0) {
1564 m_freem(m);
1565 return (IPPROTO_DONE);
1566 }
1567 break;
1568
1569 case IGMP_v2_HOST_MEMBERSHIP_REPORT:
1570 if (!V_igmp_v2enable)
1571 break;
1572 if (!ip_checkrouteralert(m))
1573 IGMPSTAT_INC(igps_rcv_nora);
1574 if (igmp_input_v2_report(ifp, ip, igmp) != 0) {
1575 m_freem(m);
1576 return (IPPROTO_DONE);
1577 }
1578 break;
1579
1580 case IGMP_v3_HOST_MEMBERSHIP_REPORT:
1581 /*
1582 * Hosts do not need to process IGMPv3 membership reports,
1583 * as report suppression is no longer required.
1584 */
1585 if (!ip_checkrouteralert(m))
1586 IGMPSTAT_INC(igps_rcv_nora);
1587 break;
1588
1589 default:
1590 break;
1591 }
1592
1593 /*
1594 * Pass all valid IGMP packets up to any process(es) listening on a
1595 * raw IGMP socket.
1596 */
1597 *mp = m;
1598 return (rip_input(mp, offp, proto));
1599 }
1600
1601
1602 /*
1603 * Fast timeout handler (global).
1604 * VIMAGE: Timeout handlers are expected to service all vimages.
1605 */
1606 void
1607 igmp_fasttimo(void)
1608 {
1609 VNET_ITERATOR_DECL(vnet_iter);
1610
1611 VNET_LIST_RLOCK_NOSLEEP();
1612 VNET_FOREACH(vnet_iter) {
1613 CURVNET_SET(vnet_iter);
1614 igmp_fasttimo_vnet();
1615 CURVNET_RESTORE();
1616 }
1617 VNET_LIST_RUNLOCK_NOSLEEP();
1618 }
1619
1620 /*
1621 * Fast timeout handler (per-vnet).
1622 * Sends are shuffled off to a netisr to deal with Giant.
1623 *
1624 * VIMAGE: Assume caller has set up our curvnet.
1625 */
1626 static void
1627 igmp_fasttimo_vnet(void)
1628 {
1629 struct mbufq scq; /* State-change packets */
1630 struct mbufq qrq; /* Query response packets */
1631 struct ifnet *ifp;
1632 struct igmp_ifsoftc *igi;
1633 struct ifmultiaddr *ifma, *next;
1634 struct in_multi *inm;
1635 struct in_multi_head inm_free_tmp;
1636 int loop, uri_fasthz;
1637
1638 loop = 0;
1639 uri_fasthz = 0;
1640
1641 /*
1642 * Quick check to see if any work needs to be done, in order to
1643 * minimize the overhead of fasttimo processing.
1644 * SMPng: XXX Unlocked reads.
1645 */
1646 if (!V_current_state_timers_running &&
1647 !V_interface_timers_running &&
1648 !V_state_change_timers_running)
1649 return;
1650
1651 SLIST_INIT(&inm_free_tmp);
1652 IN_MULTI_LIST_LOCK();
1653 IGMP_LOCK();
1654
1655 /*
1656 * IGMPv3 General Query response timer processing.
1657 */
1658 if (V_interface_timers_running) {
1659 CTR1(KTR_IGMPV3, "%s: interface timers running", __func__);
1660
1661 V_interface_timers_running = 0;
1662 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1663 if (igi->igi_v3_timer == 0) {
1664 /* Do nothing. */
1665 } else if (--igi->igi_v3_timer == 0) {
1666 igmp_v3_dispatch_general_query(igi);
1667 } else {
1668 V_interface_timers_running = 1;
1669 }
1670 }
1671 }
1672
1673 if (!V_current_state_timers_running &&
1674 !V_state_change_timers_running)
1675 goto out_locked;
1676
1677 V_current_state_timers_running = 0;
1678 V_state_change_timers_running = 0;
1679
1680 CTR1(KTR_IGMPV3, "%s: state change timers running", __func__);
1681
1682 /*
1683 * IGMPv1/v2/v3 host report and state-change timer processing.
1684 * Note: Processing a v3 group timer may remove a node.
1685 */
1686 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1687 ifp = igi->igi_ifp;
1688
1689 if (igi->igi_version == IGMP_VERSION_3) {
1690 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
1691 uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri *
1692 PR_FASTHZ);
1693 mbufq_init(&qrq, IGMP_MAX_G_GS_PACKETS);
1694 mbufq_init(&scq, IGMP_MAX_STATE_CHANGE_PACKETS);
1695 }
1696
1697 IF_ADDR_WLOCK(ifp);
1698 restart:
1699 CK_STAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) {
1700 if (ifma->ifma_addr->sa_family != AF_INET ||
1701 ifma->ifma_protospec == NULL)
1702 continue;
1703 inm = (struct in_multi *)ifma->ifma_protospec;
1704 switch (igi->igi_version) {
1705 case IGMP_VERSION_1:
1706 case IGMP_VERSION_2:
1707 igmp_v1v2_process_group_timer(inm,
1708 igi->igi_version);
1709 break;
1710 case IGMP_VERSION_3:
1711 igmp_v3_process_group_timers(&inm_free_tmp, &qrq,
1712 &scq, inm, uri_fasthz);
1713 break;
1714 }
1715 if (__predict_false(ifma_restart)) {
1716 ifma_restart = false;
1717 goto restart;
1718 }
1719 }
1720 IF_ADDR_WUNLOCK(ifp);
1721
1722 if (igi->igi_version == IGMP_VERSION_3) {
1723 igmp_dispatch_queue(&qrq, 0, loop);
1724 igmp_dispatch_queue(&scq, 0, loop);
1725
1726 /*
1727 * Free the in_multi reference(s) for this
1728 * IGMP lifecycle.
1729 */
1730 inm_release_list_deferred(&inm_free_tmp);
1731 }
1732 }
1733
1734 out_locked:
1735 IGMP_UNLOCK();
1736 IN_MULTI_LIST_UNLOCK();
1737 }
1738
1739 /*
1740 * Update host report group timer for IGMPv1/v2.
1741 * Will update the global pending timer flags.
1742 */
1743 static void
1744 igmp_v1v2_process_group_timer(struct in_multi *inm, const int version)
1745 {
1746 int report_timer_expired;
1747
1748 IN_MULTI_LIST_LOCK_ASSERT();
1749 IGMP_LOCK_ASSERT();
1750
1751 if (inm->inm_timer == 0) {
1752 report_timer_expired = 0;
1753 } else if (--inm->inm_timer == 0) {
1754 report_timer_expired = 1;
1755 } else {
1756 V_current_state_timers_running = 1;
1757 return;
1758 }
1759
1760 switch (inm->inm_state) {
1761 case IGMP_NOT_MEMBER:
1762 case IGMP_SILENT_MEMBER:
1763 case IGMP_IDLE_MEMBER:
1764 case IGMP_LAZY_MEMBER:
1765 case IGMP_SLEEPING_MEMBER:
1766 case IGMP_AWAKENING_MEMBER:
1767 break;
1768 case IGMP_REPORTING_MEMBER:
1769 if (report_timer_expired) {
1770 inm->inm_state = IGMP_IDLE_MEMBER;
1771 (void)igmp_v1v2_queue_report(inm,
1772 (version == IGMP_VERSION_2) ?
1773 IGMP_v2_HOST_MEMBERSHIP_REPORT :
1774 IGMP_v1_HOST_MEMBERSHIP_REPORT);
1775 }
1776 break;
1777 case IGMP_G_QUERY_PENDING_MEMBER:
1778 case IGMP_SG_QUERY_PENDING_MEMBER:
1779 case IGMP_LEAVING_MEMBER:
1780 break;
1781 }
1782 }
1783
1784 /*
1785 * Update a group's timers for IGMPv3.
1786 * Will update the global pending timer flags.
1787 * Note: Unlocked read from igi.
1788 */
1789 static void
1790 igmp_v3_process_group_timers(struct in_multi_head *inmh,
1791 struct mbufq *qrq, struct mbufq *scq,
1792 struct in_multi *inm, const int uri_fasthz)
1793 {
1794 int query_response_timer_expired;
1795 int state_change_retransmit_timer_expired;
1796
1797 IN_MULTI_LIST_LOCK_ASSERT();
1798 IGMP_LOCK_ASSERT();
1799
1800 query_response_timer_expired = 0;
1801 state_change_retransmit_timer_expired = 0;
1802
1803 /*
1804 * During a transition from v1/v2 compatibility mode back to v3,
1805 * a group record in REPORTING state may still have its group
1806 * timer active. This is a no-op in this function; it is easier
1807 * to deal with it here than to complicate the slow-timeout path.
1808 */
1809 if (inm->inm_timer == 0) {
1810 query_response_timer_expired = 0;
1811 } else if (--inm->inm_timer == 0) {
1812 query_response_timer_expired = 1;
1813 } else {
1814 V_current_state_timers_running = 1;
1815 }
1816
1817 if (inm->inm_sctimer == 0) {
1818 state_change_retransmit_timer_expired = 0;
1819 } else if (--inm->inm_sctimer == 0) {
1820 state_change_retransmit_timer_expired = 1;
1821 } else {
1822 V_state_change_timers_running = 1;
1823 }
1824
1825 /* We are in fasttimo, so be quick about it. */
1826 if (!state_change_retransmit_timer_expired &&
1827 !query_response_timer_expired)
1828 return;
1829
1830 switch (inm->inm_state) {
1831 case IGMP_NOT_MEMBER:
1832 case IGMP_SILENT_MEMBER:
1833 case IGMP_SLEEPING_MEMBER:
1834 case IGMP_LAZY_MEMBER:
1835 case IGMP_AWAKENING_MEMBER:
1836 case IGMP_IDLE_MEMBER:
1837 break;
1838 case IGMP_G_QUERY_PENDING_MEMBER:
1839 case IGMP_SG_QUERY_PENDING_MEMBER:
1840 /*
1841 * Respond to a previously pending Group-Specific
1842 * or Group-and-Source-Specific query by enqueueing
1843 * the appropriate Current-State report for
1844 * immediate transmission.
1845 */
1846 if (query_response_timer_expired) {
1847 int retval __unused;
1848
1849 retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1,
1850 (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER));
1851 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
1852 __func__, retval);
1853 inm->inm_state = IGMP_REPORTING_MEMBER;
1854 /* XXX Clear recorded sources for next time. */
1855 inm_clear_recorded(inm);
1856 }
1857 /* FALLTHROUGH */
1858 case IGMP_REPORTING_MEMBER:
1859 case IGMP_LEAVING_MEMBER:
1860 if (state_change_retransmit_timer_expired) {
1861 /*
1862 * State-change retransmission timer fired.
1863 * If there are any further pending retransmissions,
1864 * set the global pending state-change flag, and
1865 * reset the timer.
1866 */
1867 if (--inm->inm_scrv > 0) {
1868 inm->inm_sctimer = uri_fasthz;
1869 V_state_change_timers_running = 1;
1870 }
1871 /*
1872 * Retransmit the previously computed state-change
1873 * report. If there are no further pending
1874 * retransmissions, the mbuf queue will be consumed.
1875 * Update T0 state to T1 as we have now sent
1876 * a state-change.
1877 */
1878 (void)igmp_v3_merge_state_changes(inm, scq);
1879
1880 inm_commit(inm);
1881 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
1882 ntohl(inm->inm_addr.s_addr),
1883 inm->inm_ifp->if_xname);
1884
1885 /*
1886 * If we are leaving the group for good, make sure
1887 * we release IGMP's reference to it.
1888 * This release must be deferred using a SLIST,
1889 * as we are called from a loop which traverses
1890 * the in_ifmultiaddr TAILQ.
1891 */
1892 if (inm->inm_state == IGMP_LEAVING_MEMBER &&
1893 inm->inm_scrv == 0) {
1894 inm->inm_state = IGMP_NOT_MEMBER;
1895 inm_rele_locked(inmh, inm);
1896 }
1897 }
1898 break;
1899 }
1900 }
1901
1902
1903 /*
1904 * Suppress a group's pending response to a group or source/group query.
1905 *
1906 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency.
1907 * Do NOT update ST1/ST0 as this operation merely suppresses
1908 * the currently pending group record.
1909 * Do NOT suppress the response to a general query. It is possible but
1910 * it would require adding another state or flag.
1911 */
1912 static void
1913 igmp_v3_suppress_group_record(struct in_multi *inm)
1914 {
1915
1916 IN_MULTI_LIST_LOCK_ASSERT();
1917
1918 KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3,
1919 ("%s: not IGMPv3 mode on link", __func__));
1920
1921 if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER ||
1922 inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER)
1923 return;
1924
1925 if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
1926 inm_clear_recorded(inm);
1927
1928 inm->inm_timer = 0;
1929 inm->inm_state = IGMP_REPORTING_MEMBER;
1930 }
1931
1932 /*
1933 * Switch to a different IGMP version on the given interface,
1934 * as per Section 7.2.1.
1935 */
1936 static void
1937 igmp_set_version(struct igmp_ifsoftc *igi, const int version)
1938 {
1939 int old_version_timer;
1940
1941 IGMP_LOCK_ASSERT();
1942
1943 CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__,
1944 version, igi->igi_ifp, igi->igi_ifp->if_xname);
1945
1946 if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) {
1947 /*
1948 * Compute the "Older Version Querier Present" timer as per
1949 * Section 8.12.
1950 */
1951 old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri;
1952 old_version_timer *= PR_SLOWHZ;
1953
1954 if (version == IGMP_VERSION_1) {
1955 igi->igi_v1_timer = old_version_timer;
1956 igi->igi_v2_timer = 0;
1957 } else if (version == IGMP_VERSION_2) {
1958 igi->igi_v1_timer = 0;
1959 igi->igi_v2_timer = old_version_timer;
1960 }
1961 }
1962
1963 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
1964 if (igi->igi_version != IGMP_VERSION_2) {
1965 igi->igi_version = IGMP_VERSION_2;
1966 igmp_v3_cancel_link_timers(igi);
1967 }
1968 } else if (igi->igi_v1_timer > 0) {
1969 if (igi->igi_version != IGMP_VERSION_1) {
1970 igi->igi_version = IGMP_VERSION_1;
1971 igmp_v3_cancel_link_timers(igi);
1972 }
1973 }
1974 }
1975
1976 /*
1977 * Cancel pending IGMPv3 timers for the given link and all groups
1978 * joined on it; state-change, general-query, and group-query timers.
1979 *
1980 * Only ever called on a transition from v3 to Compatibility mode. Kill
1981 * the timers stone dead (this may be expensive for large N groups), they
1982 * will be restarted if Compatibility Mode deems that they must be due to
1983 * query processing.
1984 */
1985 static void
1986 igmp_v3_cancel_link_timers(struct igmp_ifsoftc *igi)
1987 {
1988 struct ifmultiaddr *ifma;
1989 struct ifnet *ifp;
1990 struct in_multi *inm;
1991 struct in_multi_head inm_free_tmp;
1992
1993 CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__,
1994 igi->igi_ifp, igi->igi_ifp->if_xname);
1995
1996 IN_MULTI_LIST_LOCK_ASSERT();
1997 IGMP_LOCK_ASSERT();
1998 SLIST_INIT(&inm_free_tmp);
1999
2000 /*
2001 * Stop the v3 General Query Response on this link stone dead.
2002 * If fasttimo is woken up due to V_interface_timers_running,
2003 * the flag will be cleared if there are no pending link timers.
2004 */
2005 igi->igi_v3_timer = 0;
2006
2007 /*
2008 * Now clear the current-state and state-change report timers
2009 * for all memberships scoped to this link.
2010 */
2011 ifp = igi->igi_ifp;
2012 IF_ADDR_RLOCK(ifp);
2013 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2014 if (ifma->ifma_addr->sa_family != AF_INET ||
2015 ifma->ifma_protospec == NULL)
2016 continue;
2017 inm = (struct in_multi *)ifma->ifma_protospec;
2018 switch (inm->inm_state) {
2019 case IGMP_NOT_MEMBER:
2020 case IGMP_SILENT_MEMBER:
2021 case IGMP_IDLE_MEMBER:
2022 case IGMP_LAZY_MEMBER:
2023 case IGMP_SLEEPING_MEMBER:
2024 case IGMP_AWAKENING_MEMBER:
2025 /*
2026 * These states are either not relevant in v3 mode,
2027 * or are unreported. Do nothing.
2028 */
2029 break;
2030 case IGMP_LEAVING_MEMBER:
2031 /*
2032 * If we are leaving the group and switching to
2033 * compatibility mode, we need to release the final
2034 * reference held for issuing the INCLUDE {}, and
2035 * transition to REPORTING to ensure the host leave
2036 * message is sent upstream to the old querier --
2037 * transition to NOT would lose the leave and race.
2038 */
2039 inm_rele_locked(&inm_free_tmp, inm);
2040 /* FALLTHROUGH */
2041 case IGMP_G_QUERY_PENDING_MEMBER:
2042 case IGMP_SG_QUERY_PENDING_MEMBER:
2043 inm_clear_recorded(inm);
2044 /* FALLTHROUGH */
2045 case IGMP_REPORTING_MEMBER:
2046 inm->inm_state = IGMP_REPORTING_MEMBER;
2047 break;
2048 }
2049 /*
2050 * Always clear state-change and group report timers.
2051 * Free any pending IGMPv3 state-change records.
2052 */
2053 inm->inm_sctimer = 0;
2054 inm->inm_timer = 0;
2055 mbufq_drain(&inm->inm_scq);
2056 }
2057 IF_ADDR_RUNLOCK(ifp);
2058
2059 inm_release_list_deferred(&inm_free_tmp);
2060 }
2061
2062 /*
2063 * Update the Older Version Querier Present timers for a link.
2064 * See Section 7.2.1 of RFC 3376.
2065 */
2066 static void
2067 igmp_v1v2_process_querier_timers(struct igmp_ifsoftc *igi)
2068 {
2069
2070 IGMP_LOCK_ASSERT();
2071
2072 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) {
2073 /*
2074 * IGMPv1 and IGMPv2 Querier Present timers expired.
2075 *
2076 * Revert to IGMPv3.
2077 */
2078 if (igi->igi_version != IGMP_VERSION_3) {
2079 CTR5(KTR_IGMPV3,
2080 "%s: transition from v%d -> v%d on %p(%s)",
2081 __func__, igi->igi_version, IGMP_VERSION_3,
2082 igi->igi_ifp, igi->igi_ifp->if_xname);
2083 igi->igi_version = IGMP_VERSION_3;
2084 }
2085 } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2086 /*
2087 * IGMPv1 Querier Present timer expired,
2088 * IGMPv2 Querier Present timer running.
2089 * If IGMPv2 was disabled since last timeout,
2090 * revert to IGMPv3.
2091 * If IGMPv2 is enabled, revert to IGMPv2.
2092 */
2093 if (!V_igmp_v2enable) {
2094 CTR5(KTR_IGMPV3,
2095 "%s: transition from v%d -> v%d on %p(%s)",
2096 __func__, igi->igi_version, IGMP_VERSION_3,
2097 igi->igi_ifp, igi->igi_ifp->if_xname);
2098 igi->igi_v2_timer = 0;
2099 igi->igi_version = IGMP_VERSION_3;
2100 } else {
2101 --igi->igi_v2_timer;
2102 if (igi->igi_version != IGMP_VERSION_2) {
2103 CTR5(KTR_IGMPV3,
2104 "%s: transition from v%d -> v%d on %p(%s)",
2105 __func__, igi->igi_version, IGMP_VERSION_2,
2106 igi->igi_ifp, igi->igi_ifp->if_xname);
2107 igi->igi_version = IGMP_VERSION_2;
2108 igmp_v3_cancel_link_timers(igi);
2109 }
2110 }
2111 } else if (igi->igi_v1_timer > 0) {
2112 /*
2113 * IGMPv1 Querier Present timer running.
2114 * Stop IGMPv2 timer if running.
2115 *
2116 * If IGMPv1 was disabled since last timeout,
2117 * revert to IGMPv3.
2118 * If IGMPv1 is enabled, reset IGMPv2 timer if running.
2119 */
2120 if (!V_igmp_v1enable) {
2121 CTR5(KTR_IGMPV3,
2122 "%s: transition from v%d -> v%d on %p(%s)",
2123 __func__, igi->igi_version, IGMP_VERSION_3,
2124 igi->igi_ifp, igi->igi_ifp->if_xname);
2125 igi->igi_v1_timer = 0;
2126 igi->igi_version = IGMP_VERSION_3;
2127 } else {
2128 --igi->igi_v1_timer;
2129 }
2130 if (igi->igi_v2_timer > 0) {
2131 CTR3(KTR_IGMPV3,
2132 "%s: cancel v2 timer on %p(%s)",
2133 __func__, igi->igi_ifp, igi->igi_ifp->if_xname);
2134 igi->igi_v2_timer = 0;
2135 }
2136 }
2137 }
2138
2139 /*
2140 * Global slowtimo handler.
2141 * VIMAGE: Timeout handlers are expected to service all vimages.
2142 */
2143 void
2144 igmp_slowtimo(void)
2145 {
2146 VNET_ITERATOR_DECL(vnet_iter);
2147
2148 VNET_LIST_RLOCK_NOSLEEP();
2149 VNET_FOREACH(vnet_iter) {
2150 CURVNET_SET(vnet_iter);
2151 igmp_slowtimo_vnet();
2152 CURVNET_RESTORE();
2153 }
2154 VNET_LIST_RUNLOCK_NOSLEEP();
2155 }
2156
2157 /*
2158 * Per-vnet slowtimo handler.
2159 */
2160 static void
2161 igmp_slowtimo_vnet(void)
2162 {
2163 struct igmp_ifsoftc *igi;
2164
2165 IGMP_LOCK();
2166
2167 LIST_FOREACH(igi, &V_igi_head, igi_link) {
2168 igmp_v1v2_process_querier_timers(igi);
2169 }
2170
2171 IGMP_UNLOCK();
2172 }
2173
2174 /*
2175 * Dispatch an IGMPv1/v2 host report or leave message.
2176 * These are always small enough to fit inside a single mbuf.
2177 */
2178 static int
2179 igmp_v1v2_queue_report(struct in_multi *inm, const int type)
2180 {
2181 struct ifnet *ifp;
2182 struct igmp *igmp;
2183 struct ip *ip;
2184 struct mbuf *m;
2185
2186 IN_MULTI_LIST_LOCK_ASSERT();
2187 IGMP_LOCK_ASSERT();
2188
2189 ifp = inm->inm_ifp;
2190
2191 m = m_gethdr(M_NOWAIT, MT_DATA);
2192 if (m == NULL)
2193 return (ENOMEM);
2194 M_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp));
2195
2196 m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp);
2197
2198 m->m_data += sizeof(struct ip);
2199 m->m_len = sizeof(struct igmp);
2200
2201 igmp = mtod(m, struct igmp *);
2202 igmp->igmp_type = type;
2203 igmp->igmp_code = 0;
2204 igmp->igmp_group = inm->inm_addr;
2205 igmp->igmp_cksum = 0;
2206 igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp));
2207
2208 m->m_data -= sizeof(struct ip);
2209 m->m_len += sizeof(struct ip);
2210
2211 ip = mtod(m, struct ip *);
2212 ip->ip_tos = 0;
2213 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct igmp));
2214 ip->ip_off = 0;
2215 ip->ip_p = IPPROTO_IGMP;
2216 ip->ip_src.s_addr = INADDR_ANY;
2217
2218 if (type == IGMP_HOST_LEAVE_MESSAGE)
2219 ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP);
2220 else
2221 ip->ip_dst = inm->inm_addr;
2222
2223 igmp_save_context(m, ifp);
2224
2225 m->m_flags |= M_IGMPV2;
2226 if (inm->inm_igi->igi_flags & IGIF_LOOPBACK)
2227 m->m_flags |= M_IGMP_LOOP;
2228
2229 CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m);
2230 netisr_dispatch(NETISR_IGMP, m);
2231
2232 return (0);
2233 }
2234
2235 /*
2236 * Process a state change from the upper layer for the given IPv4 group.
2237 *
2238 * Each socket holds a reference on the in_multi in its own ip_moptions.
2239 * The socket layer will have made the necessary updates to.the group
2240 * state, it is now up to IGMP to issue a state change report if there
2241 * has been any change between T0 (when the last state-change was issued)
2242 * and T1 (now).
2243 *
2244 * We use the IGMPv3 state machine at group level. The IGMP module
2245 * however makes the decision as to which IGMP protocol version to speak.
2246 * A state change *from* INCLUDE {} always means an initial join.
2247 * A state change *to* INCLUDE {} always means a final leave.
2248 *
2249 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can
2250 * save ourselves a bunch of work; any exclusive mode groups need not
2251 * compute source filter lists.
2252 *
2253 * VIMAGE: curvnet should have been set by caller, as this routine
2254 * is called from the socket option handlers.
2255 */
2256 int
2257 igmp_change_state(struct in_multi *inm)
2258 {
2259 struct igmp_ifsoftc *igi;
2260 struct ifnet *ifp;
2261 int error;
2262
2263 error = 0;
2264 IN_MULTI_LOCK_ASSERT();
2265 /*
2266 * Try to detect if the upper layer just asked us to change state
2267 * for an interface which has now gone away.
2268 */
2269 KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__));
2270 ifp = inm->inm_ifma->ifma_ifp;
2271 /*
2272 * Sanity check that netinet's notion of ifp is the
2273 * same as net's.
2274 */
2275 KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__));
2276
2277 IGMP_LOCK();
2278
2279 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
2280 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
2281
2282 /*
2283 * If we detect a state transition to or from MCAST_UNDEFINED
2284 * for this group, then we are starting or finishing an IGMP
2285 * life cycle for this group.
2286 */
2287 if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) {
2288 CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__,
2289 inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode);
2290 if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) {
2291 CTR1(KTR_IGMPV3, "%s: initial join", __func__);
2292 error = igmp_initial_join(inm, igi);
2293 goto out_locked;
2294 } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) {
2295 CTR1(KTR_IGMPV3, "%s: final leave", __func__);
2296 igmp_final_leave(inm, igi);
2297 goto out_locked;
2298 }
2299 } else {
2300 CTR1(KTR_IGMPV3, "%s: filter set change", __func__);
2301 }
2302
2303 error = igmp_handle_state_change(inm, igi);
2304
2305 out_locked:
2306 IGMP_UNLOCK();
2307 return (error);
2308 }
2309
2310 /*
2311 * Perform the initial join for an IGMP group.
2312 *
2313 * When joining a group:
2314 * If the group should have its IGMP traffic suppressed, do nothing.
2315 * IGMPv1 starts sending IGMPv1 host membership reports.
2316 * IGMPv2 starts sending IGMPv2 host membership reports.
2317 * IGMPv3 will schedule an IGMPv3 state-change report containing the
2318 * initial state of the membership.
2319 */
2320 static int
2321 igmp_initial_join(struct in_multi *inm, struct igmp_ifsoftc *igi)
2322 {
2323 struct ifnet *ifp;
2324 struct mbufq *mq;
2325 int error, retval, syncstates;
2326
2327 CTR4(KTR_IGMPV3, "%s: initial join 0x%08x on ifp %p(%s)", __func__,
2328 ntohl(inm->inm_addr.s_addr), inm->inm_ifp, inm->inm_ifp->if_xname);
2329
2330 error = 0;
2331 syncstates = 1;
2332
2333 ifp = inm->inm_ifp;
2334
2335 IN_MULTI_LOCK_ASSERT();
2336 IGMP_LOCK_ASSERT();
2337
2338 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2339
2340 /*
2341 * Groups joined on loopback or marked as 'not reported',
2342 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and
2343 * are never reported in any IGMP protocol exchanges.
2344 * All other groups enter the appropriate IGMP state machine
2345 * for the version in use on this link.
2346 * A link marked as IGIF_SILENT causes IGMP to be completely
2347 * disabled for the link.
2348 */
2349 if ((ifp->if_flags & IFF_LOOPBACK) ||
2350 (igi->igi_flags & IGIF_SILENT) ||
2351 !igmp_isgroupreported(inm->inm_addr)) {
2352 CTR1(KTR_IGMPV3,
2353 "%s: not kicking state machine for silent group", __func__);
2354 inm->inm_state = IGMP_SILENT_MEMBER;
2355 inm->inm_timer = 0;
2356 } else {
2357 /*
2358 * Deal with overlapping in_multi lifecycle.
2359 * If this group was LEAVING, then make sure
2360 * we drop the reference we picked up to keep the
2361 * group around for the final INCLUDE {} enqueue.
2362 */
2363 if (igi->igi_version == IGMP_VERSION_3 &&
2364 inm->inm_state == IGMP_LEAVING_MEMBER) {
2365 MPASS(inm->inm_refcount > 1);
2366 inm_rele_locked(NULL, inm);
2367 }
2368 inm->inm_state = IGMP_REPORTING_MEMBER;
2369
2370 switch (igi->igi_version) {
2371 case IGMP_VERSION_1:
2372 case IGMP_VERSION_2:
2373 inm->inm_state = IGMP_IDLE_MEMBER;
2374 error = igmp_v1v2_queue_report(inm,
2375 (igi->igi_version == IGMP_VERSION_2) ?
2376 IGMP_v2_HOST_MEMBERSHIP_REPORT :
2377 IGMP_v1_HOST_MEMBERSHIP_REPORT);
2378 if (error == 0) {
2379 inm->inm_timer = IGMP_RANDOM_DELAY(
2380 IGMP_V1V2_MAX_RI * PR_FASTHZ);
2381 V_current_state_timers_running = 1;
2382 }
2383 break;
2384
2385 case IGMP_VERSION_3:
2386 /*
2387 * Defer update of T0 to T1, until the first copy
2388 * of the state change has been transmitted.
2389 */
2390 syncstates = 0;
2391
2392 /*
2393 * Immediately enqueue a State-Change Report for
2394 * this interface, freeing any previous reports.
2395 * Don't kick the timers if there is nothing to do,
2396 * or if an error occurred.
2397 */
2398 mq = &inm->inm_scq;
2399 mbufq_drain(mq);
2400 retval = igmp_v3_enqueue_group_record(mq, inm, 1,
2401 0, 0);
2402 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
2403 __func__, retval);
2404 if (retval <= 0) {
2405 error = retval * -1;
2406 break;
2407 }
2408
2409 /*
2410 * Schedule transmission of pending state-change
2411 * report up to RV times for this link. The timer
2412 * will fire at the next igmp_fasttimo (~200ms),
2413 * giving us an opportunity to merge the reports.
2414 */
2415 if (igi->igi_flags & IGIF_LOOPBACK) {
2416 inm->inm_scrv = 1;
2417 } else {
2418 KASSERT(igi->igi_rv > 1,
2419 ("%s: invalid robustness %d", __func__,
2420 igi->igi_rv));
2421 inm->inm_scrv = igi->igi_rv;
2422 }
2423 inm->inm_sctimer = 1;
2424 V_state_change_timers_running = 1;
2425
2426 error = 0;
2427 break;
2428 }
2429 }
2430
2431 /*
2432 * Only update the T0 state if state change is atomic,
2433 * i.e. we don't need to wait for a timer to fire before we
2434 * can consider the state change to have been communicated.
2435 */
2436 if (syncstates) {
2437 inm_commit(inm);
2438 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2439 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2440 }
2441
2442 return (error);
2443 }
2444
2445 /*
2446 * Issue an intermediate state change during the IGMP life-cycle.
2447 */
2448 static int
2449 igmp_handle_state_change(struct in_multi *inm, struct igmp_ifsoftc *igi)
2450 {
2451 struct ifnet *ifp;
2452 int retval;
2453
2454 CTR4(KTR_IGMPV3, "%s: state change for 0x%08x on ifp %p(%s)", __func__,
2455 ntohl(inm->inm_addr.s_addr), inm->inm_ifp, inm->inm_ifp->if_xname);
2456
2457 ifp = inm->inm_ifp;
2458
2459 IN_MULTI_LIST_LOCK_ASSERT();
2460 IGMP_LOCK_ASSERT();
2461
2462 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2463
2464 if ((ifp->if_flags & IFF_LOOPBACK) ||
2465 (igi->igi_flags & IGIF_SILENT) ||
2466 !igmp_isgroupreported(inm->inm_addr) ||
2467 (igi->igi_version != IGMP_VERSION_3)) {
2468 if (!igmp_isgroupreported(inm->inm_addr)) {
2469 CTR1(KTR_IGMPV3,
2470 "%s: not kicking state machine for silent group", __func__);
2471 }
2472 CTR1(KTR_IGMPV3, "%s: nothing to do", __func__);
2473 inm_commit(inm);
2474 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2475 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2476 return (0);
2477 }
2478
2479 mbufq_drain(&inm->inm_scq);
2480
2481 retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0);
2482 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval);
2483 if (retval <= 0)
2484 return (-retval);
2485
2486 /*
2487 * If record(s) were enqueued, start the state-change
2488 * report timer for this group.
2489 */
2490 inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv);
2491 inm->inm_sctimer = 1;
2492 V_state_change_timers_running = 1;
2493
2494 return (0);
2495 }
2496
2497 /*
2498 * Perform the final leave for an IGMP group.
2499 *
2500 * When leaving a group:
2501 * IGMPv1 does nothing.
2502 * IGMPv2 sends a host leave message, if and only if we are the reporter.
2503 * IGMPv3 enqueues a state-change report containing a transition
2504 * to INCLUDE {} for immediate transmission.
2505 */
2506 static void
2507 igmp_final_leave(struct in_multi *inm, struct igmp_ifsoftc *igi)
2508 {
2509 int syncstates;
2510
2511 syncstates = 1;
2512
2513 CTR4(KTR_IGMPV3, "%s: final leave 0x%08x on ifp %p(%s)",
2514 __func__, ntohl(inm->inm_addr.s_addr), inm->inm_ifp,
2515 inm->inm_ifp->if_xname);
2516
2517 IN_MULTI_LIST_LOCK_ASSERT();
2518 IGMP_LOCK_ASSERT();
2519
2520 switch (inm->inm_state) {
2521 case IGMP_NOT_MEMBER:
2522 case IGMP_SILENT_MEMBER:
2523 case IGMP_LEAVING_MEMBER:
2524 /* Already leaving or left; do nothing. */
2525 CTR1(KTR_IGMPV3,
2526 "%s: not kicking state machine for silent group", __func__);
2527 break;
2528 case IGMP_REPORTING_MEMBER:
2529 case IGMP_IDLE_MEMBER:
2530 case IGMP_G_QUERY_PENDING_MEMBER:
2531 case IGMP_SG_QUERY_PENDING_MEMBER:
2532 if (igi->igi_version == IGMP_VERSION_2) {
2533 #ifdef INVARIANTS
2534 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
2535 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
2536 panic("%s: IGMPv3 state reached, not IGMPv3 mode",
2537 __func__);
2538 #endif
2539 igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE);
2540 inm->inm_state = IGMP_NOT_MEMBER;
2541 } else if (igi->igi_version == IGMP_VERSION_3) {
2542 /*
2543 * Stop group timer and all pending reports.
2544 * Immediately enqueue a state-change report
2545 * TO_IN {} to be sent on the next fast timeout,
2546 * giving us an opportunity to merge reports.
2547 */
2548 mbufq_drain(&inm->inm_scq);
2549 inm->inm_timer = 0;
2550 if (igi->igi_flags & IGIF_LOOPBACK) {
2551 inm->inm_scrv = 1;
2552 } else {
2553 inm->inm_scrv = igi->igi_rv;
2554 }
2555 CTR4(KTR_IGMPV3, "%s: Leaving 0x%08x/%s with %d "
2556 "pending retransmissions.", __func__,
2557 ntohl(inm->inm_addr.s_addr),
2558 inm->inm_ifp->if_xname, inm->inm_scrv);
2559 if (inm->inm_scrv == 0) {
2560 inm->inm_state = IGMP_NOT_MEMBER;
2561 inm->inm_sctimer = 0;
2562 } else {
2563 int retval __unused;
2564
2565 inm_acquire_locked(inm);
2566
2567 retval = igmp_v3_enqueue_group_record(
2568 &inm->inm_scq, inm, 1, 0, 0);
2569 KASSERT(retval != 0,
2570 ("%s: enqueue record = %d", __func__,
2571 retval));
2572
2573 inm->inm_state = IGMP_LEAVING_MEMBER;
2574 inm->inm_sctimer = 1;
2575 V_state_change_timers_running = 1;
2576 syncstates = 0;
2577 }
2578 break;
2579 }
2580 break;
2581 case IGMP_LAZY_MEMBER:
2582 case IGMP_SLEEPING_MEMBER:
2583 case IGMP_AWAKENING_MEMBER:
2584 /* Our reports are suppressed; do nothing. */
2585 break;
2586 }
2587
2588 if (syncstates) {
2589 inm_commit(inm);
2590 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2591 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2592 inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
2593 CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for 0x%08x/%s",
2594 __func__, ntohl(inm->inm_addr.s_addr),
2595 inm->inm_ifp->if_xname);
2596 }
2597 }
2598
2599 /*
2600 * Enqueue an IGMPv3 group record to the given output queue.
2601 *
2602 * XXX This function could do with having the allocation code
2603 * split out, and the multiple-tree-walks coalesced into a single
2604 * routine as has been done in igmp_v3_enqueue_filter_change().
2605 *
2606 * If is_state_change is zero, a current-state record is appended.
2607 * If is_state_change is non-zero, a state-change report is appended.
2608 *
2609 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2610 * If is_group_query is zero, and if there is a packet with free space
2611 * at the tail of the queue, it will be appended to providing there
2612 * is enough free space.
2613 * Otherwise a new mbuf packet chain is allocated.
2614 *
2615 * If is_source_query is non-zero, each source is checked to see if
2616 * it was recorded for a Group-Source query, and will be omitted if
2617 * it is not both in-mode and recorded.
2618 *
2619 * The function will attempt to allocate leading space in the packet
2620 * for the IP/IGMP header to be prepended without fragmenting the chain.
2621 *
2622 * If successful the size of all data appended to the queue is returned,
2623 * otherwise an error code less than zero is returned, or zero if
2624 * no record(s) were appended.
2625 */
2626 static int
2627 igmp_v3_enqueue_group_record(struct mbufq *mq, struct in_multi *inm,
2628 const int is_state_change, const int is_group_query,
2629 const int is_source_query)
2630 {
2631 struct igmp_grouprec ig;
2632 struct igmp_grouprec *pig;
2633 struct ifnet *ifp;
2634 struct ip_msource *ims, *nims;
2635 struct mbuf *m0, *m, *md;
2636 int is_filter_list_change;
2637 int minrec0len, m0srcs, msrcs, nbytes, off;
2638 int record_has_sources;
2639 int now;
2640 int type;
2641 in_addr_t naddr;
2642 uint8_t mode;
2643
2644 IN_MULTI_LIST_LOCK_ASSERT();
2645
2646 ifp = inm->inm_ifp;
2647 is_filter_list_change = 0;
2648 m = NULL;
2649 m0 = NULL;
2650 m0srcs = 0;
2651 msrcs = 0;
2652 nbytes = 0;
2653 nims = NULL;
2654 record_has_sources = 1;
2655 pig = NULL;
2656 type = IGMP_DO_NOTHING;
2657 mode = inm->inm_st[1].iss_fmode;
2658
2659 /*
2660 * If we did not transition out of ASM mode during t0->t1,
2661 * and there are no source nodes to process, we can skip
2662 * the generation of source records.
2663 */
2664 if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 &&
2665 inm->inm_nsrc == 0)
2666 record_has_sources = 0;
2667
2668 if (is_state_change) {
2669 /*
2670 * Queue a state change record.
2671 * If the mode did not change, and there are non-ASM
2672 * listeners or source filters present,
2673 * we potentially need to issue two records for the group.
2674 * If we are transitioning to MCAST_UNDEFINED, we need
2675 * not send any sources.
2676 * If there are ASM listeners, and there was no filter
2677 * mode transition of any kind, do nothing.
2678 */
2679 if (mode != inm->inm_st[0].iss_fmode) {
2680 if (mode == MCAST_EXCLUDE) {
2681 CTR1(KTR_IGMPV3, "%s: change to EXCLUDE",
2682 __func__);
2683 type = IGMP_CHANGE_TO_EXCLUDE_MODE;
2684 } else {
2685 CTR1(KTR_IGMPV3, "%s: change to INCLUDE",
2686 __func__);
2687 type = IGMP_CHANGE_TO_INCLUDE_MODE;
2688 if (mode == MCAST_UNDEFINED)
2689 record_has_sources = 0;
2690 }
2691 } else {
2692 if (record_has_sources) {
2693 is_filter_list_change = 1;
2694 } else {
2695 type = IGMP_DO_NOTHING;
2696 }
2697 }
2698 } else {
2699 /*
2700 * Queue a current state record.
2701 */
2702 if (mode == MCAST_EXCLUDE) {
2703 type = IGMP_MODE_IS_EXCLUDE;
2704 } else if (mode == MCAST_INCLUDE) {
2705 type = IGMP_MODE_IS_INCLUDE;
2706 KASSERT(inm->inm_st[1].iss_asm == 0,
2707 ("%s: inm %p is INCLUDE but ASM count is %d",
2708 __func__, inm, inm->inm_st[1].iss_asm));
2709 }
2710 }
2711
2712 /*
2713 * Generate the filter list changes using a separate function.
2714 */
2715 if (is_filter_list_change)
2716 return (igmp_v3_enqueue_filter_change(mq, inm));
2717
2718 if (type == IGMP_DO_NOTHING) {
2719 CTR3(KTR_IGMPV3, "%s: nothing to do for 0x%08x/%s", __func__,
2720 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2721 return (0);
2722 }
2723
2724 /*
2725 * If any sources are present, we must be able to fit at least
2726 * one in the trailing space of the tail packet's mbuf,
2727 * ideally more.
2728 */
2729 minrec0len = sizeof(struct igmp_grouprec);
2730 if (record_has_sources)
2731 minrec0len += sizeof(in_addr_t);
2732
2733 CTR4(KTR_IGMPV3, "%s: queueing %s for 0x%08x/%s", __func__,
2734 igmp_rec_type_to_str(type), ntohl(inm->inm_addr.s_addr),
2735 inm->inm_ifp->if_xname);
2736
2737 /*
2738 * Check if we have a packet in the tail of the queue for this
2739 * group into which the first group record for this group will fit.
2740 * Otherwise allocate a new packet.
2741 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT.
2742 * Note: Group records for G/GSR query responses MUST be sent
2743 * in their own packet.
2744 */
2745 m0 = mbufq_last(mq);
2746 if (!is_group_query &&
2747 m0 != NULL &&
2748 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) &&
2749 (m0->m_pkthdr.len + minrec0len) <
2750 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
2751 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2752 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2753 m = m0;
2754 CTR1(KTR_IGMPV3, "%s: use existing packet", __func__);
2755 } else {
2756 if (mbufq_full(mq)) {
2757 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2758 return (-ENOMEM);
2759 }
2760 m = NULL;
2761 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2762 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2763 if (!is_state_change && !is_group_query) {
2764 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2765 if (m)
2766 m->m_data += IGMP_LEADINGSPACE;
2767 }
2768 if (m == NULL) {
2769 m = m_gethdr(M_NOWAIT, MT_DATA);
2770 if (m)
2771 M_ALIGN(m, IGMP_LEADINGSPACE);
2772 }
2773 if (m == NULL)
2774 return (-ENOMEM);
2775
2776 igmp_save_context(m, ifp);
2777
2778 CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__);
2779 }
2780
2781 /*
2782 * Append group record.
2783 * If we have sources, we don't know how many yet.
2784 */
2785 ig.ig_type = type;
2786 ig.ig_datalen = 0;
2787 ig.ig_numsrc = 0;
2788 ig.ig_group = inm->inm_addr;
2789 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2790 if (m != m0)
2791 m_freem(m);
2792 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2793 return (-ENOMEM);
2794 }
2795 nbytes += sizeof(struct igmp_grouprec);
2796
2797 /*
2798 * Append as many sources as will fit in the first packet.
2799 * If we are appending to a new packet, the chain allocation
2800 * may potentially use clusters; use m_getptr() in this case.
2801 * If we are appending to an existing packet, we need to obtain
2802 * a pointer to the group record after m_append(), in case a new
2803 * mbuf was allocated.
2804 * Only append sources which are in-mode at t1. If we are
2805 * transitioning to MCAST_UNDEFINED state on the group, do not
2806 * include source entries.
2807 * Only report recorded sources in our filter set when responding
2808 * to a group-source query.
2809 */
2810 if (record_has_sources) {
2811 if (m == m0) {
2812 md = m_last(m);
2813 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2814 md->m_len - nbytes);
2815 } else {
2816 md = m_getptr(m, 0, &off);
2817 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2818 off);
2819 }
2820 msrcs = 0;
2821 RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) {
2822 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x", __func__,
2823 ims->ims_haddr);
2824 now = ims_get_mode(inm, ims, 1);
2825 CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now);
2826 if ((now != mode) ||
2827 (now == mode && mode == MCAST_UNDEFINED)) {
2828 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2829 continue;
2830 }
2831 if (is_source_query && ims->ims_stp == 0) {
2832 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2833 __func__);
2834 continue;
2835 }
2836 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2837 naddr = htonl(ims->ims_haddr);
2838 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2839 if (m != m0)
2840 m_freem(m);
2841 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2842 __func__);
2843 return (-ENOMEM);
2844 }
2845 nbytes += sizeof(in_addr_t);
2846 ++msrcs;
2847 if (msrcs == m0srcs)
2848 break;
2849 }
2850 CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__,
2851 msrcs);
2852 pig->ig_numsrc = htons(msrcs);
2853 nbytes += (msrcs * sizeof(in_addr_t));
2854 }
2855
2856 if (is_source_query && msrcs == 0) {
2857 CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__);
2858 if (m != m0)
2859 m_freem(m);
2860 return (0);
2861 }
2862
2863 /*
2864 * We are good to go with first packet.
2865 */
2866 if (m != m0) {
2867 CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__);
2868 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2869 mbufq_enqueue(mq, m);
2870 } else
2871 m->m_pkthdr.PH_vt.vt_nrecs++;
2872
2873 /*
2874 * No further work needed if no source list in packet(s).
2875 */
2876 if (!record_has_sources)
2877 return (nbytes);
2878
2879 /*
2880 * Whilst sources remain to be announced, we need to allocate
2881 * a new packet and fill out as many sources as will fit.
2882 * Always try for a cluster first.
2883 */
2884 while (nims != NULL) {
2885 if (mbufq_full(mq)) {
2886 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2887 return (-ENOMEM);
2888 }
2889 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2890 if (m)
2891 m->m_data += IGMP_LEADINGSPACE;
2892 if (m == NULL) {
2893 m = m_gethdr(M_NOWAIT, MT_DATA);
2894 if (m)
2895 M_ALIGN(m, IGMP_LEADINGSPACE);
2896 }
2897 if (m == NULL)
2898 return (-ENOMEM);
2899 igmp_save_context(m, ifp);
2900 md = m_getptr(m, 0, &off);
2901 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off);
2902 CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__);
2903
2904 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2905 if (m != m0)
2906 m_freem(m);
2907 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2908 return (-ENOMEM);
2909 }
2910 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2911 nbytes += sizeof(struct igmp_grouprec);
2912
2913 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2914 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2915
2916 msrcs = 0;
2917 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
2918 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x", __func__,
2919 ims->ims_haddr);
2920 now = ims_get_mode(inm, ims, 1);
2921 if ((now != mode) ||
2922 (now == mode && mode == MCAST_UNDEFINED)) {
2923 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2924 continue;
2925 }
2926 if (is_source_query && ims->ims_stp == 0) {
2927 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2928 __func__);
2929 continue;
2930 }
2931 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2932 naddr = htonl(ims->ims_haddr);
2933 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2934 if (m != m0)
2935 m_freem(m);
2936 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2937 __func__);
2938 return (-ENOMEM);
2939 }
2940 ++msrcs;
2941 if (msrcs == m0srcs)
2942 break;
2943 }
2944 pig->ig_numsrc = htons(msrcs);
2945 nbytes += (msrcs * sizeof(in_addr_t));
2946
2947 CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__);
2948 mbufq_enqueue(mq, m);
2949 }
2950
2951 return (nbytes);
2952 }
2953
2954 /*
2955 * Type used to mark record pass completion.
2956 * We exploit the fact we can cast to this easily from the
2957 * current filter modes on each ip_msource node.
2958 */
2959 typedef enum {
2960 REC_NONE = 0x00, /* MCAST_UNDEFINED */
2961 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
2962 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
2963 REC_FULL = REC_ALLOW | REC_BLOCK
2964 } rectype_t;
2965
2966 /*
2967 * Enqueue an IGMPv3 filter list change to the given output queue.
2968 *
2969 * Source list filter state is held in an RB-tree. When the filter list
2970 * for a group is changed without changing its mode, we need to compute
2971 * the deltas between T0 and T1 for each source in the filter set,
2972 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
2973 *
2974 * As we may potentially queue two record types, and the entire R-B tree
2975 * needs to be walked at once, we break this out into its own function
2976 * so we can generate a tightly packed queue of packets.
2977 *
2978 * XXX This could be written to only use one tree walk, although that makes
2979 * serializing into the mbuf chains a bit harder. For now we do two walks
2980 * which makes things easier on us, and it may or may not be harder on
2981 * the L2 cache.
2982 *
2983 * If successful the size of all data appended to the queue is returned,
2984 * otherwise an error code less than zero is returned, or zero if
2985 * no record(s) were appended.
2986 */
2987 static int
2988 igmp_v3_enqueue_filter_change(struct mbufq *mq, struct in_multi *inm)
2989 {
2990 static const int MINRECLEN =
2991 sizeof(struct igmp_grouprec) + sizeof(in_addr_t);
2992 struct ifnet *ifp;
2993 struct igmp_grouprec ig;
2994 struct igmp_grouprec *pig;
2995 struct ip_msource *ims, *nims;
2996 struct mbuf *m, *m0, *md;
2997 in_addr_t naddr;
2998 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
2999 int nallow, nblock;
3000 uint8_t mode, now, then;
3001 rectype_t crt, drt, nrt;
3002
3003 IN_MULTI_LIST_LOCK_ASSERT();
3004
3005 if (inm->inm_nsrc == 0 ||
3006 (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0))
3007 return (0);
3008
3009 ifp = inm->inm_ifp; /* interface */
3010 mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */
3011 crt = REC_NONE; /* current group record type */
3012 drt = REC_NONE; /* mask of completed group record types */
3013 nrt = REC_NONE; /* record type for current node */
3014 m0srcs = 0; /* # source which will fit in current mbuf chain */
3015 nbytes = 0; /* # of bytes appended to group's state-change queue */
3016 npbytes = 0; /* # of bytes appended this packet */
3017 rsrcs = 0; /* # sources encoded in current record */
3018 schanged = 0; /* # nodes encoded in overall filter change */
3019 nallow = 0; /* # of source entries in ALLOW_NEW */
3020 nblock = 0; /* # of source entries in BLOCK_OLD */
3021 nims = NULL; /* next tree node pointer */
3022
3023 /*
3024 * For each possible filter record mode.
3025 * The first kind of source we encounter tells us which
3026 * is the first kind of record we start appending.
3027 * If a node transitioned to UNDEFINED at t1, its mode is treated
3028 * as the inverse of the group's filter mode.
3029 */
3030 while (drt != REC_FULL) {
3031 do {
3032 m0 = mbufq_last(mq);
3033 if (m0 != NULL &&
3034 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
3035 IGMP_V3_REPORT_MAXRECS) &&
3036 (m0->m_pkthdr.len + MINRECLEN) <
3037 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
3038 m = m0;
3039 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3040 sizeof(struct igmp_grouprec)) /
3041 sizeof(in_addr_t);
3042 CTR1(KTR_IGMPV3,
3043 "%s: use previous packet", __func__);
3044 } else {
3045 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3046 if (m)
3047 m->m_data += IGMP_LEADINGSPACE;
3048 if (m == NULL) {
3049 m = m_gethdr(M_NOWAIT, MT_DATA);
3050 if (m)
3051 M_ALIGN(m, IGMP_LEADINGSPACE);
3052 }
3053 if (m == NULL) {
3054 CTR1(KTR_IGMPV3,
3055 "%s: m_get*() failed", __func__);
3056 return (-ENOMEM);
3057 }
3058 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3059 igmp_save_context(m, ifp);
3060 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
3061 sizeof(struct igmp_grouprec)) /
3062 sizeof(in_addr_t);
3063 npbytes = 0;
3064 CTR1(KTR_IGMPV3,
3065 "%s: allocated new packet", __func__);
3066 }
3067 /*
3068 * Append the IGMP group record header to the
3069 * current packet's data area.
3070 * Recalculate pointer to free space for next
3071 * group record, in case m_append() allocated
3072 * a new mbuf or cluster.
3073 */
3074 memset(&ig, 0, sizeof(ig));
3075 ig.ig_group = inm->inm_addr;
3076 if (!m_append(m, sizeof(ig), (void *)&ig)) {
3077 if (m != m0)
3078 m_freem(m);
3079 CTR1(KTR_IGMPV3,
3080 "%s: m_append() failed", __func__);
3081 return (-ENOMEM);
3082 }
3083 npbytes += sizeof(struct igmp_grouprec);
3084 if (m != m0) {
3085 /* new packet; offset in c hain */
3086 md = m_getptr(m, npbytes -
3087 sizeof(struct igmp_grouprec), &off);
3088 pig = (struct igmp_grouprec *)(mtod(md,
3089 uint8_t *) + off);
3090 } else {
3091 /* current packet; offset from last append */
3092 md = m_last(m);
3093 pig = (struct igmp_grouprec *)(mtod(md,
3094 uint8_t *) + md->m_len -
3095 sizeof(struct igmp_grouprec));
3096 }
3097 /*
3098 * Begin walking the tree for this record type
3099 * pass, or continue from where we left off
3100 * previously if we had to allocate a new packet.
3101 * Only report deltas in-mode at t1.
3102 * We need not report included sources as allowed
3103 * if we are in inclusive mode on the group,
3104 * however the converse is not true.
3105 */
3106 rsrcs = 0;
3107 if (nims == NULL)
3108 nims = RB_MIN(ip_msource_tree, &inm->inm_srcs);
3109 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
3110 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x",
3111 __func__, ims->ims_haddr);
3112 now = ims_get_mode(inm, ims, 1);
3113 then = ims_get_mode(inm, ims, 0);
3114 CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d",
3115 __func__, then, now);
3116 if (now == then) {
3117 CTR1(KTR_IGMPV3,
3118 "%s: skip unchanged", __func__);
3119 continue;
3120 }
3121 if (mode == MCAST_EXCLUDE &&
3122 now == MCAST_INCLUDE) {
3123 CTR1(KTR_IGMPV3,
3124 "%s: skip IN src on EX group",
3125 __func__);
3126 continue;
3127 }
3128 nrt = (rectype_t)now;
3129 if (nrt == REC_NONE)
3130 nrt = (rectype_t)(~mode & REC_FULL);
3131 if (schanged++ == 0) {
3132 crt = nrt;
3133 } else if (crt != nrt)
3134 continue;
3135 naddr = htonl(ims->ims_haddr);
3136 if (!m_append(m, sizeof(in_addr_t),
3137 (void *)&naddr)) {
3138 if (m != m0)
3139 m_freem(m);
3140 CTR1(KTR_IGMPV3,
3141 "%s: m_append() failed", __func__);
3142 return (-ENOMEM);
3143 }
3144 nallow += !!(crt == REC_ALLOW);
3145 nblock += !!(crt == REC_BLOCK);
3146 if (++rsrcs == m0srcs)
3147 break;
3148 }
3149 /*
3150 * If we did not append any tree nodes on this
3151 * pass, back out of allocations.
3152 */
3153 if (rsrcs == 0) {
3154 npbytes -= sizeof(struct igmp_grouprec);
3155 if (m != m0) {
3156 CTR1(KTR_IGMPV3,
3157 "%s: m_free(m)", __func__);
3158 m_freem(m);
3159 } else {
3160 CTR1(KTR_IGMPV3,
3161 "%s: m_adj(m, -ig)", __func__);
3162 m_adj(m, -((int)sizeof(
3163 struct igmp_grouprec)));
3164 }
3165 continue;
3166 }
3167 npbytes += (rsrcs * sizeof(in_addr_t));
3168 if (crt == REC_ALLOW)
3169 pig->ig_type = IGMP_ALLOW_NEW_SOURCES;
3170 else if (crt == REC_BLOCK)
3171 pig->ig_type = IGMP_BLOCK_OLD_SOURCES;
3172 pig->ig_numsrc = htons(rsrcs);
3173 /*
3174 * Count the new group record, and enqueue this
3175 * packet if it wasn't already queued.
3176 */
3177 m->m_pkthdr.PH_vt.vt_nrecs++;
3178 if (m != m0)
3179 mbufq_enqueue(mq, m);
3180 nbytes += npbytes;
3181 } while (nims != NULL);
3182 drt |= crt;
3183 crt = (~crt & REC_FULL);
3184 }
3185
3186 CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
3187 nallow, nblock);
3188
3189 return (nbytes);
3190 }
3191
3192 static int
3193 igmp_v3_merge_state_changes(struct in_multi *inm, struct mbufq *scq)
3194 {
3195 struct mbufq *gq;
3196 struct mbuf *m; /* pending state-change */
3197 struct mbuf *m0; /* copy of pending state-change */
3198 struct mbuf *mt; /* last state-change in packet */
3199 int docopy, domerge;
3200 u_int recslen;
3201
3202 docopy = 0;
3203 domerge = 0;
3204 recslen = 0;
3205
3206 IN_MULTI_LIST_LOCK_ASSERT();
3207 IGMP_LOCK_ASSERT();
3208
3209 /*
3210 * If there are further pending retransmissions, make a writable
3211 * copy of each queued state-change message before merging.
3212 */
3213 if (inm->inm_scrv > 0)
3214 docopy = 1;
3215
3216 gq = &inm->inm_scq;
3217 #ifdef KTR
3218 if (mbufq_first(gq) == NULL) {
3219 CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty",
3220 __func__, inm);
3221 }
3222 #endif
3223
3224 m = mbufq_first(gq);
3225 while (m != NULL) {
3226 /*
3227 * Only merge the report into the current packet if
3228 * there is sufficient space to do so; an IGMPv3 report
3229 * packet may only contain 65,535 group records.
3230 * Always use a simple mbuf chain concatentation to do this,
3231 * as large state changes for single groups may have
3232 * allocated clusters.
3233 */
3234 domerge = 0;
3235 mt = mbufq_last(scq);
3236 if (mt != NULL) {
3237 recslen = m_length(m, NULL);
3238
3239 if ((mt->m_pkthdr.PH_vt.vt_nrecs +
3240 m->m_pkthdr.PH_vt.vt_nrecs <=
3241 IGMP_V3_REPORT_MAXRECS) &&
3242 (mt->m_pkthdr.len + recslen <=
3243 (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE)))
3244 domerge = 1;
3245 }
3246
3247 if (!domerge && mbufq_full(gq)) {
3248 CTR2(KTR_IGMPV3,
3249 "%s: outbound queue full, skipping whole packet %p",
3250 __func__, m);
3251 mt = m->m_nextpkt;
3252 if (!docopy)
3253 m_freem(m);
3254 m = mt;
3255 continue;
3256 }
3257
3258 if (!docopy) {
3259 CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m);
3260 m0 = mbufq_dequeue(gq);
3261 m = m0->m_nextpkt;
3262 } else {
3263 CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m);
3264 m0 = m_dup(m, M_NOWAIT);
3265 if (m0 == NULL)
3266 return (ENOMEM);
3267 m0->m_nextpkt = NULL;
3268 m = m->m_nextpkt;
3269 }
3270
3271 if (!domerge) {
3272 CTR3(KTR_IGMPV3, "%s: queueing %p to scq %p)",
3273 __func__, m0, scq);
3274 mbufq_enqueue(scq, m0);
3275 } else {
3276 struct mbuf *mtl; /* last mbuf of packet mt */
3277
3278 CTR3(KTR_IGMPV3, "%s: merging %p with scq tail %p)",
3279 __func__, m0, mt);
3280
3281 mtl = m_last(mt);
3282 m0->m_flags &= ~M_PKTHDR;
3283 mt->m_pkthdr.len += recslen;
3284 mt->m_pkthdr.PH_vt.vt_nrecs +=
3285 m0->m_pkthdr.PH_vt.vt_nrecs;
3286
3287 mtl->m_next = m0;
3288 }
3289 }
3290
3291 return (0);
3292 }
3293
3294 /*
3295 * Respond to a pending IGMPv3 General Query.
3296 */
3297 static void
3298 igmp_v3_dispatch_general_query(struct igmp_ifsoftc *igi)
3299 {
3300 struct ifmultiaddr *ifma;
3301 struct ifnet *ifp;
3302 struct in_multi *inm;
3303 int retval __unused, loop;
3304
3305 IN_MULTI_LIST_LOCK_ASSERT();
3306 IGMP_LOCK_ASSERT();
3307
3308 KASSERT(igi->igi_version == IGMP_VERSION_3,
3309 ("%s: called when version %d", __func__, igi->igi_version));
3310
3311 /*
3312 * Check that there are some packets queued. If so, send them first.
3313 * For large number of groups the reply to general query can take
3314 * many packets, we should finish sending them before starting of
3315 * queuing the new reply.
3316 */
3317 if (mbufq_len(&igi->igi_gq) != 0)
3318 goto send;
3319
3320 ifp = igi->igi_ifp;
3321
3322 IF_ADDR_RLOCK(ifp);
3323 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3324 if (ifma->ifma_addr->sa_family != AF_INET ||
3325 ifma->ifma_protospec == NULL)
3326 continue;
3327
3328 inm = (struct in_multi *)ifma->ifma_protospec;
3329 KASSERT(ifp == inm->inm_ifp,
3330 ("%s: inconsistent ifp", __func__));
3331
3332 switch (inm->inm_state) {
3333 case IGMP_NOT_MEMBER:
3334 case IGMP_SILENT_MEMBER:
3335 break;
3336 case IGMP_REPORTING_MEMBER:
3337 case IGMP_IDLE_MEMBER:
3338 case IGMP_LAZY_MEMBER:
3339 case IGMP_SLEEPING_MEMBER:
3340 case IGMP_AWAKENING_MEMBER:
3341 inm->inm_state = IGMP_REPORTING_MEMBER;
3342 retval = igmp_v3_enqueue_group_record(&igi->igi_gq,
3343 inm, 0, 0, 0);
3344 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
3345 __func__, retval);
3346 break;
3347 case IGMP_G_QUERY_PENDING_MEMBER:
3348 case IGMP_SG_QUERY_PENDING_MEMBER:
3349 case IGMP_LEAVING_MEMBER:
3350 break;
3351 }
3352 }
3353 IF_ADDR_RUNLOCK(ifp);
3354
3355 send:
3356 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
3357 igmp_dispatch_queue(&igi->igi_gq, IGMP_MAX_RESPONSE_BURST, loop);
3358
3359 /*
3360 * Slew transmission of bursts over 500ms intervals.
3361 */
3362 if (mbufq_first(&igi->igi_gq) != NULL) {
3363 igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY(
3364 IGMP_RESPONSE_BURST_INTERVAL);
3365 V_interface_timers_running = 1;
3366 }
3367 }
3368
3369 /*
3370 * Transmit the next pending IGMP message in the output queue.
3371 *
3372 * We get called from netisr_processqueue(). A mutex private to igmpoq
3373 * will be acquired and released around this routine.
3374 *
3375 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3376 * MRT: Nothing needs to be done, as IGMP traffic is always local to
3377 * a link and uses a link-scope multicast address.
3378 */
3379 static void
3380 igmp_intr(struct mbuf *m)
3381 {
3382 struct ip_moptions imo;
3383 struct ifnet *ifp;
3384 struct mbuf *ipopts, *m0;
3385 int error;
3386 uint32_t ifindex;
3387
3388 CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m);
3389
3390 /*
3391 * Set VNET image pointer from enqueued mbuf chain
3392 * before doing anything else. Whilst we use interface
3393 * indexes to guard against interface detach, they are
3394 * unique to each VIMAGE and must be retrieved.
3395 */
3396 CURVNET_SET((struct vnet *)(m->m_pkthdr.PH_loc.ptr));
3397 ifindex = igmp_restore_context(m);
3398
3399 /*
3400 * Check if the ifnet still exists. This limits the scope of
3401 * any race in the absence of a global ifp lock for low cost
3402 * (an array lookup).
3403 */
3404 ifp = ifnet_byindex(ifindex);
3405 if (ifp == NULL) {
3406 CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.",
3407 __func__, m, ifindex);
3408 m_freem(m);
3409 IPSTAT_INC(ips_noroute);
3410 goto out;
3411 }
3412
3413 ipopts = V_igmp_sendra ? m_raopt : NULL;
3414
3415 imo.imo_multicast_ttl = 1;
3416 imo.imo_multicast_vif = -1;
3417 imo.imo_multicast_loop = (V_ip_mrouter != NULL);
3418
3419 /*
3420 * If the user requested that IGMP traffic be explicitly
3421 * redirected to the loopback interface (e.g. they are running a
3422 * MANET interface and the routing protocol needs to see the
3423 * updates), handle this now.
3424 */
3425 if (m->m_flags & M_IGMP_LOOP)
3426 imo.imo_multicast_ifp = V_loif;
3427 else
3428 imo.imo_multicast_ifp = ifp;
3429
3430 if (m->m_flags & M_IGMPV2) {
3431 m0 = m;
3432 } else {
3433 m0 = igmp_v3_encap_report(ifp, m);
3434 if (m0 == NULL) {
3435 CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m);
3436 m_freem(m);
3437 IPSTAT_INC(ips_odropped);
3438 goto out;
3439 }
3440 }
3441
3442 igmp_scrub_context(m0);
3443 m_clrprotoflags(m);
3444 m0->m_pkthdr.rcvif = V_loif;
3445 #ifdef MAC
3446 mac_netinet_igmp_send(ifp, m0);
3447 #endif
3448 error = ip_output(m0, ipopts, NULL, 0, &imo, NULL);
3449 if (error) {
3450 CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error);
3451 goto out;
3452 }
3453
3454 IGMPSTAT_INC(igps_snd_reports);
3455
3456 out:
3457 /*
3458 * We must restore the existing vnet pointer before
3459 * continuing as we are run from netisr context.
3460 */
3461 CURVNET_RESTORE();
3462 }
3463
3464 /*
3465 * Encapsulate an IGMPv3 report.
3466 *
3467 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf
3468 * chain has already had its IP/IGMPv3 header prepended. In this case
3469 * the function will not attempt to prepend; the lengths and checksums
3470 * will however be re-computed.
3471 *
3472 * Returns a pointer to the new mbuf chain head, or NULL if the
3473 * allocation failed.
3474 */
3475 static struct mbuf *
3476 igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m)
3477 {
3478 struct rm_priotracker in_ifa_tracker;
3479 struct igmp_report *igmp;
3480 struct ip *ip;
3481 int hdrlen, igmpreclen;
3482
3483 KASSERT((m->m_flags & M_PKTHDR),
3484 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3485
3486 igmpreclen = m_length(m, NULL);
3487 hdrlen = sizeof(struct ip) + sizeof(struct igmp_report);
3488
3489 if (m->m_flags & M_IGMPV3_HDR) {
3490 igmpreclen -= hdrlen;
3491 } else {
3492 M_PREPEND(m, hdrlen, M_NOWAIT);
3493 if (m == NULL)
3494 return (NULL);
3495 m->m_flags |= M_IGMPV3_HDR;
3496 }
3497
3498 CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen);
3499
3500 m->m_data += sizeof(struct ip);
3501 m->m_len -= sizeof(struct ip);
3502
3503 igmp = mtod(m, struct igmp_report *);
3504 igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT;
3505 igmp->ir_rsv1 = 0;
3506 igmp->ir_rsv2 = 0;
3507 igmp->ir_numgrps = htons(m->m_pkthdr.PH_vt.vt_nrecs);
3508 igmp->ir_cksum = 0;
3509 igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen);
3510 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3511
3512 m->m_data -= sizeof(struct ip);
3513 m->m_len += sizeof(struct ip);
3514
3515 ip = mtod(m, struct ip *);
3516 ip->ip_tos = IPTOS_PREC_INTERNETCONTROL;
3517 ip->ip_len = htons(hdrlen + igmpreclen);
3518 ip->ip_off = htons(IP_DF);
3519 ip->ip_p = IPPROTO_IGMP;
3520 ip->ip_sum = 0;
3521
3522 ip->ip_src.s_addr = INADDR_ANY;
3523
3524 if (m->m_flags & M_IGMP_LOOP) {
3525 struct in_ifaddr *ia;
3526
3527 NET_EPOCH_ENTER();
3528 IFP_TO_IA(ifp, ia, &in_ifa_tracker);
3529 if (ia != NULL)
3530 ip->ip_src = ia->ia_addr.sin_addr;
3531 NET_EPOCH_EXIT();
3532 }
3533
3534 ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP);
3535
3536 return (m);
3537 }
3538
3539 #ifdef KTR
3540 static char *
3541 igmp_rec_type_to_str(const int type)
3542 {
3543
3544 switch (type) {
3545 case IGMP_CHANGE_TO_EXCLUDE_MODE:
3546 return "TO_EX";
3547 break;
3548 case IGMP_CHANGE_TO_INCLUDE_MODE:
3549 return "TO_IN";
3550 break;
3551 case IGMP_MODE_IS_EXCLUDE:
3552 return "MODE_EX";
3553 break;
3554 case IGMP_MODE_IS_INCLUDE:
3555 return "MODE_IN";
3556 break;
3557 case IGMP_ALLOW_NEW_SOURCES:
3558 return "ALLOW_NEW";
3559 break;
3560 case IGMP_BLOCK_OLD_SOURCES:
3561 return "BLOCK_OLD";
3562 break;
3563 default:
3564 break;
3565 }
3566 return "unknown";
3567 }
3568 #endif
3569
3570 #ifdef VIMAGE
3571 static void
3572 vnet_igmp_init(const void *unused __unused)
3573 {
3574
3575 netisr_register_vnet(&igmp_nh);
3576 }
3577 VNET_SYSINIT(vnet_igmp_init, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3578 vnet_igmp_init, NULL);
3579
3580 static void
3581 vnet_igmp_uninit(const void *unused __unused)
3582 {
3583
3584 /* This can happen when we shutdown the entire network stack. */
3585 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3586
3587 netisr_unregister_vnet(&igmp_nh);
3588 }
3589 VNET_SYSUNINIT(vnet_igmp_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3590 vnet_igmp_uninit, NULL);
3591 #endif
3592
3593 #ifdef DDB
3594 DB_SHOW_COMMAND(igi_list, db_show_igi_list)
3595 {
3596 struct igmp_ifsoftc *igi, *tigi;
3597 LIST_HEAD(_igi_list, igmp_ifsoftc) *igi_head;
3598
3599 if (!have_addr) {
3600 db_printf("usage: show igi_list <addr>\n");
3601 return;
3602 }
3603 igi_head = (struct _igi_list *)addr;
3604
3605 LIST_FOREACH_SAFE(igi, igi_head, igi_link, tigi) {
3606 db_printf("igmp_ifsoftc %p:\n", igi);
3607 db_printf(" ifp %p\n", igi->igi_ifp);
3608 db_printf(" version %u\n", igi->igi_version);
3609 db_printf(" v1_timer %u\n", igi->igi_v1_timer);
3610 db_printf(" v2_timer %u\n", igi->igi_v2_timer);
3611 db_printf(" v3_timer %u\n", igi->igi_v3_timer);
3612 db_printf(" flags %#x\n", igi->igi_flags);
3613 db_printf(" rv %u\n", igi->igi_rv);
3614 db_printf(" qi %u\n", igi->igi_qi);
3615 db_printf(" qri %u\n", igi->igi_qri);
3616 db_printf(" uri %u\n", igi->igi_uri);
3617 /* struct mbufq igi_gq; */
3618 db_printf("\n");
3619 }
3620 }
3621 #endif
3622
3623 static int
3624 igmp_modevent(module_t mod, int type, void *unused __unused)
3625 {
3626
3627 switch (type) {
3628 case MOD_LOAD:
3629 CTR1(KTR_IGMPV3, "%s: initializing", __func__);
3630 IGMP_LOCK_INIT();
3631 m_raopt = igmp_ra_alloc();
3632 netisr_register(&igmp_nh);
3633 break;
3634 case MOD_UNLOAD:
3635 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3636 netisr_unregister(&igmp_nh);
3637 m_free(m_raopt);
3638 m_raopt = NULL;
3639 IGMP_LOCK_DESTROY();
3640 break;
3641 default:
3642 return (EOPNOTSUPP);
3643 }
3644 return (0);
3645 }
3646
3647 static moduledata_t igmp_mod = {
3648 "igmp",
3649 igmp_modevent,
3650 0
3651 };
3652 DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE);
Cache object: 891e6d061552f7f1c0cad24d5cf75a26
|