FreeBSD/Linux Kernel Cross Reference
sys/netinet/igmp.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2007-2009 Bruce Simpson.
5 * Copyright (c) 1988 Stephen Deering.
6 * Copyright (c) 1992, 1993
7 * The Regents of the University of California. All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * Stephen Deering of Stanford University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
37 */
38
39 /*
40 * Internet Group Management Protocol (IGMP) routines.
41 * [RFC1112, RFC2236, RFC3376]
42 *
43 * Written by Steve Deering, Stanford, May 1988.
44 * Modified by Rosen Sharma, Stanford, Aug 1994.
45 * Modified by Bill Fenner, Xerox PARC, Feb 1995.
46 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995.
47 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson.
48 *
49 * MULTICAST Revision: 3.5.1.4
50 */
51
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54
55 #include "opt_ddb.h"
56
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/module.h>
60 #include <sys/malloc.h>
61 #include <sys/mbuf.h>
62 #include <sys/socket.h>
63 #include <sys/protosw.h>
64 #include <sys/kernel.h>
65 #include <sys/lock.h>
66 #include <sys/rmlock.h>
67 #include <sys/sysctl.h>
68 #include <sys/ktr.h>
69 #include <sys/condvar.h>
70
71 #ifdef DDB
72 #include <ddb/ddb.h>
73 #endif
74
75 #include <net/if.h>
76 #include <net/if_var.h>
77 #include <net/netisr.h>
78 #include <net/vnet.h>
79
80 #include <netinet/in.h>
81 #include <netinet/in_var.h>
82 #include <netinet/in_systm.h>
83 #include <netinet/ip.h>
84 #include <netinet/ip_var.h>
85 #include <netinet/ip_options.h>
86 #include <netinet/igmp.h>
87 #include <netinet/igmp_var.h>
88
89 #include <machine/in_cksum.h>
90
91 #include <security/mac/mac_framework.h>
92
93 #ifndef KTR_IGMPV3
94 #define KTR_IGMPV3 KTR_INET
95 #endif
96
97 static struct igmp_ifsoftc *
98 igi_alloc_locked(struct ifnet *);
99 static void igi_delete_locked(const struct ifnet *);
100 static void igmp_dispatch_queue(struct mbufq *, int, const int);
101 static void igmp_fasttimo_vnet(void);
102 static void igmp_final_leave(struct in_multi *, struct igmp_ifsoftc *);
103 static int igmp_handle_state_change(struct in_multi *,
104 struct igmp_ifsoftc *);
105 static int igmp_initial_join(struct in_multi *, struct igmp_ifsoftc *);
106 static int igmp_input_v1_query(struct ifnet *, const struct ip *,
107 const struct igmp *);
108 static int igmp_input_v2_query(struct ifnet *, const struct ip *,
109 const struct igmp *);
110 static int igmp_input_v3_query(struct ifnet *, const struct ip *,
111 /*const*/ struct igmpv3 *);
112 static int igmp_input_v3_group_query(struct in_multi *,
113 struct igmp_ifsoftc *, int, /*const*/ struct igmpv3 *);
114 static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *,
115 /*const*/ struct igmp *);
116 static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *,
117 /*const*/ struct igmp *);
118 static void igmp_intr(struct mbuf *);
119 static int igmp_isgroupreported(const struct in_addr);
120 static struct mbuf *
121 igmp_ra_alloc(void);
122 #ifdef KTR
123 static char * igmp_rec_type_to_str(const int);
124 #endif
125 static void igmp_set_version(struct igmp_ifsoftc *, const int);
126 static void igmp_slowtimo_vnet(void);
127 static int igmp_v1v2_queue_report(struct in_multi *, const int);
128 static void igmp_v1v2_process_group_timer(struct in_multi *, const int);
129 static void igmp_v1v2_process_querier_timers(struct igmp_ifsoftc *);
130 static void igmp_v2_update_group(struct in_multi *, const int);
131 static void igmp_v3_cancel_link_timers(struct igmp_ifsoftc *);
132 static void igmp_v3_dispatch_general_query(struct igmp_ifsoftc *);
133 static struct mbuf *
134 igmp_v3_encap_report(struct ifnet *, struct mbuf *);
135 static int igmp_v3_enqueue_group_record(struct mbufq *,
136 struct in_multi *, const int, const int, const int);
137 static int igmp_v3_enqueue_filter_change(struct mbufq *,
138 struct in_multi *);
139 static void igmp_v3_process_group_timers(struct in_multi_head *,
140 struct mbufq *, struct mbufq *, struct in_multi *,
141 const int);
142 static int igmp_v3_merge_state_changes(struct in_multi *,
143 struct mbufq *);
144 static void igmp_v3_suppress_group_record(struct in_multi *);
145 static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS);
146 static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS);
147 static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS);
148
149 static const struct netisr_handler igmp_nh = {
150 .nh_name = "igmp",
151 .nh_handler = igmp_intr,
152 .nh_proto = NETISR_IGMP,
153 .nh_policy = NETISR_POLICY_SOURCE,
154 };
155
156 /*
157 * System-wide globals.
158 *
159 * Unlocked access to these is OK, except for the global IGMP output
160 * queue. The IGMP subsystem lock ends up being system-wide for the moment,
161 * because all VIMAGEs have to share a global output queue, as netisrs
162 * themselves are not virtualized.
163 *
164 * Locking:
165 * * The permitted lock order is: IN_MULTI_LIST_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
166 * Any may be taken independently; if any are held at the same
167 * time, the above lock order must be followed.
168 * * All output is delegated to the netisr.
169 * Now that Giant has been eliminated, the netisr may be inlined.
170 * * IN_MULTI_LIST_LOCK covers in_multi.
171 * * IGMP_LOCK covers igmp_ifsoftc and any global variables in this file,
172 * including the output queue.
173 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
174 * per-link state iterators.
175 * * igmp_ifsoftc is valid as long as PF_INET is attached to the interface,
176 * therefore it is not refcounted.
177 * We allow unlocked reads of igmp_ifsoftc when accessed via in_multi.
178 *
179 * Reference counting
180 * * IGMP acquires its own reference every time an in_multi is passed to
181 * it and the group is being joined for the first time.
182 * * IGMP releases its reference(s) on in_multi in a deferred way,
183 * because the operations which process the release run as part of
184 * a loop whose control variables are directly affected by the release
185 * (that, and not recursing on the IF_ADDR_LOCK).
186 *
187 * VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds
188 * to a vnet in ifp->if_vnet.
189 *
190 * SMPng: XXX We may potentially race operations on ifma_protospec.
191 * The problem is that we currently lack a clean way of taking the
192 * IF_ADDR_LOCK() between the ifnet and in layers w/o recursing,
193 * as anything which modifies ifma needs to be covered by that lock.
194 * So check for ifma_protospec being NULL before proceeding.
195 */
196 struct mtx igmp_mtx;
197
198 struct mbuf *m_raopt; /* Router Alert option */
199 static MALLOC_DEFINE(M_IGMP, "igmp", "igmp state");
200
201 /*
202 * VIMAGE-wide globals.
203 *
204 * The IGMPv3 timers themselves need to run per-image, however,
205 * protosw timers run globally (see tcp).
206 * An ifnet can only be in one vimage at a time, and the loopback
207 * ifnet, loif, is itself virtualized.
208 * It would otherwise be possible to seriously hose IGMP state,
209 * and create inconsistencies in upstream multicast routing, if you have
210 * multiple VIMAGEs running on the same link joining different multicast
211 * groups, UNLESS the "primary IP address" is different. This is because
212 * IGMP for IPv4 does not force link-local addresses to be used for each
213 * node, unlike MLD for IPv6.
214 * Obviously the IGMPv3 per-interface state has per-vimage granularity
215 * also as a result.
216 *
217 * FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection
218 * policy to control the address used by IGMP on the link.
219 */
220 VNET_DEFINE_STATIC(int, interface_timers_running); /* IGMPv3 general
221 * query response */
222 VNET_DEFINE_STATIC(int, state_change_timers_running); /* IGMPv3 state-change
223 * retransmit */
224 VNET_DEFINE_STATIC(int, current_state_timers_running); /* IGMPv1/v2 host
225 * report; IGMPv3 g/sg
226 * query response */
227
228 #define V_interface_timers_running VNET(interface_timers_running)
229 #define V_state_change_timers_running VNET(state_change_timers_running)
230 #define V_current_state_timers_running VNET(current_state_timers_running)
231
232 VNET_DEFINE_STATIC(LIST_HEAD(, igmp_ifsoftc), igi_head) =
233 LIST_HEAD_INITIALIZER(igi_head);
234 VNET_DEFINE_STATIC(struct igmpstat, igmpstat) = {
235 .igps_version = IGPS_VERSION_3,
236 .igps_len = sizeof(struct igmpstat),
237 };
238 VNET_DEFINE_STATIC(struct timeval, igmp_gsrdelay) = {10, 0};
239
240 #define V_igi_head VNET(igi_head)
241 #define V_igmpstat VNET(igmpstat)
242 #define V_igmp_gsrdelay VNET(igmp_gsrdelay)
243
244 VNET_DEFINE_STATIC(int, igmp_recvifkludge) = 1;
245 VNET_DEFINE_STATIC(int, igmp_sendra) = 1;
246 VNET_DEFINE_STATIC(int, igmp_sendlocal) = 1;
247 VNET_DEFINE_STATIC(int, igmp_v1enable) = 1;
248 VNET_DEFINE_STATIC(int, igmp_v2enable) = 1;
249 VNET_DEFINE_STATIC(int, igmp_legacysupp);
250 VNET_DEFINE_STATIC(int, igmp_default_version) = IGMP_VERSION_3;
251
252 #define V_igmp_recvifkludge VNET(igmp_recvifkludge)
253 #define V_igmp_sendra VNET(igmp_sendra)
254 #define V_igmp_sendlocal VNET(igmp_sendlocal)
255 #define V_igmp_v1enable VNET(igmp_v1enable)
256 #define V_igmp_v2enable VNET(igmp_v2enable)
257 #define V_igmp_legacysupp VNET(igmp_legacysupp)
258 #define V_igmp_default_version VNET(igmp_default_version)
259
260 /*
261 * Virtualized sysctls.
262 */
263 SYSCTL_STRUCT(_net_inet_igmp, IGMPCTL_STATS, stats, CTLFLAG_VNET | CTLFLAG_RW,
264 &VNET_NAME(igmpstat), igmpstat, "");
265 SYSCTL_INT(_net_inet_igmp, OID_AUTO, recvifkludge, CTLFLAG_VNET | CTLFLAG_RW,
266 &VNET_NAME(igmp_recvifkludge), 0,
267 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address");
268 SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendra, CTLFLAG_VNET | CTLFLAG_RW,
269 &VNET_NAME(igmp_sendra), 0,
270 "Send IP Router Alert option in IGMPv2/v3 messages");
271 SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendlocal, CTLFLAG_VNET | CTLFLAG_RW,
272 &VNET_NAME(igmp_sendlocal), 0,
273 "Send IGMP membership reports for 224.0.0.0/24 groups");
274 SYSCTL_INT(_net_inet_igmp, OID_AUTO, v1enable, CTLFLAG_VNET | CTLFLAG_RW,
275 &VNET_NAME(igmp_v1enable), 0,
276 "Enable backwards compatibility with IGMPv1");
277 SYSCTL_INT(_net_inet_igmp, OID_AUTO, v2enable, CTLFLAG_VNET | CTLFLAG_RW,
278 &VNET_NAME(igmp_v2enable), 0,
279 "Enable backwards compatibility with IGMPv2");
280 SYSCTL_INT(_net_inet_igmp, OID_AUTO, legacysupp, CTLFLAG_VNET | CTLFLAG_RW,
281 &VNET_NAME(igmp_legacysupp), 0,
282 "Allow v1/v2 reports to suppress v3 group responses");
283 SYSCTL_PROC(_net_inet_igmp, OID_AUTO, default_version,
284 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
285 &VNET_NAME(igmp_default_version), 0, sysctl_igmp_default_version, "I",
286 "Default version of IGMP to run on each interface");
287 SYSCTL_PROC(_net_inet_igmp, OID_AUTO, gsrdelay,
288 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
289 &VNET_NAME(igmp_gsrdelay.tv_sec), 0, sysctl_igmp_gsr, "I",
290 "Rate limit for IGMPv3 Group-and-Source queries in seconds");
291
292 /*
293 * Non-virtualized sysctls.
294 */
295 static SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo,
296 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_igmp_ifinfo,
297 "Per-interface IGMPv3 state");
298
299 static __inline void
300 igmp_save_context(struct mbuf *m, struct ifnet *ifp)
301 {
302
303 #ifdef VIMAGE
304 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
305 #endif /* VIMAGE */
306 m->m_pkthdr.rcvif = ifp;
307 m->m_pkthdr.flowid = ifp->if_index;
308 }
309
310 static __inline void
311 igmp_scrub_context(struct mbuf *m)
312 {
313
314 m->m_pkthdr.PH_loc.ptr = NULL;
315 m->m_pkthdr.flowid = 0;
316 }
317
318 /*
319 * Restore context from a queued IGMP output chain.
320 * Return saved ifindex.
321 *
322 * VIMAGE: The assertion is there to make sure that we
323 * actually called CURVNET_SET() with what's in the mbuf chain.
324 */
325 static __inline uint32_t
326 igmp_restore_context(struct mbuf *m)
327 {
328
329 #ifdef notyet
330 #if defined(VIMAGE) && defined(INVARIANTS)
331 KASSERT(curvnet == (m->m_pkthdr.PH_loc.ptr),
332 ("%s: called when curvnet was not restored", __func__));
333 #endif
334 #endif
335 return (m->m_pkthdr.flowid);
336 }
337
338 /*
339 * Retrieve or set default IGMP version.
340 *
341 * VIMAGE: Assume curvnet set by caller.
342 * SMPng: NOTE: Serialized by IGMP lock.
343 */
344 static int
345 sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS)
346 {
347 int error;
348 int new;
349
350 error = sysctl_wire_old_buffer(req, sizeof(int));
351 if (error)
352 return (error);
353
354 IGMP_LOCK();
355
356 new = V_igmp_default_version;
357
358 error = sysctl_handle_int(oidp, &new, 0, req);
359 if (error || !req->newptr)
360 goto out_locked;
361
362 if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) {
363 error = EINVAL;
364 goto out_locked;
365 }
366
367 CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d",
368 V_igmp_default_version, new);
369
370 V_igmp_default_version = new;
371
372 out_locked:
373 IGMP_UNLOCK();
374 return (error);
375 }
376
377 /*
378 * Retrieve or set threshold between group-source queries in seconds.
379 *
380 * VIMAGE: Assume curvnet set by caller.
381 * SMPng: NOTE: Serialized by IGMP lock.
382 */
383 static int
384 sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS)
385 {
386 int error;
387 int i;
388
389 error = sysctl_wire_old_buffer(req, sizeof(int));
390 if (error)
391 return (error);
392
393 IGMP_LOCK();
394
395 i = V_igmp_gsrdelay.tv_sec;
396
397 error = sysctl_handle_int(oidp, &i, 0, req);
398 if (error || !req->newptr)
399 goto out_locked;
400
401 if (i < -1 || i >= 60) {
402 error = EINVAL;
403 goto out_locked;
404 }
405
406 CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d",
407 V_igmp_gsrdelay.tv_sec, i);
408 V_igmp_gsrdelay.tv_sec = i;
409
410 out_locked:
411 IGMP_UNLOCK();
412 return (error);
413 }
414
415 /*
416 * Expose struct igmp_ifsoftc to userland, keyed by ifindex.
417 * For use by ifmcstat(8).
418 *
419 * SMPng: NOTE: Does an unlocked ifindex space read.
420 * VIMAGE: Assume curvnet set by caller. The node handler itself
421 * is not directly virtualized.
422 */
423 static int
424 sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
425 {
426 int *name;
427 int error;
428 u_int namelen;
429 struct ifnet *ifp;
430 struct igmp_ifsoftc *igi;
431
432 name = (int *)arg1;
433 namelen = arg2;
434
435 if (req->newptr != NULL)
436 return (EPERM);
437
438 if (namelen != 1)
439 return (EINVAL);
440
441 error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo));
442 if (error)
443 return (error);
444
445 IN_MULTI_LIST_LOCK();
446 IGMP_LOCK();
447
448 if (name[0] <= 0 || name[0] > V_if_index) {
449 error = ENOENT;
450 goto out_locked;
451 }
452
453 error = ENOENT;
454
455 ifp = ifnet_byindex(name[0]);
456 if (ifp == NULL)
457 goto out_locked;
458
459 LIST_FOREACH(igi, &V_igi_head, igi_link) {
460 if (ifp == igi->igi_ifp) {
461 struct igmp_ifinfo info;
462
463 info.igi_version = igi->igi_version;
464 info.igi_v1_timer = igi->igi_v1_timer;
465 info.igi_v2_timer = igi->igi_v2_timer;
466 info.igi_v3_timer = igi->igi_v3_timer;
467 info.igi_flags = igi->igi_flags;
468 info.igi_rv = igi->igi_rv;
469 info.igi_qi = igi->igi_qi;
470 info.igi_qri = igi->igi_qri;
471 info.igi_uri = igi->igi_uri;
472 error = SYSCTL_OUT(req, &info, sizeof(info));
473 break;
474 }
475 }
476
477 out_locked:
478 IGMP_UNLOCK();
479 IN_MULTI_LIST_UNLOCK();
480 return (error);
481 }
482
483 /*
484 * Dispatch an entire queue of pending packet chains
485 * using the netisr.
486 * VIMAGE: Assumes the vnet pointer has been set.
487 */
488 static void
489 igmp_dispatch_queue(struct mbufq *mq, int limit, const int loop)
490 {
491 struct mbuf *m;
492
493 while ((m = mbufq_dequeue(mq)) != NULL) {
494 CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, mq, m);
495 if (loop)
496 m->m_flags |= M_IGMP_LOOP;
497 netisr_dispatch(NETISR_IGMP, m);
498 if (--limit == 0)
499 break;
500 }
501 }
502
503 /*
504 * Filter outgoing IGMP report state by group.
505 *
506 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1).
507 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are
508 * disabled for all groups in the 224.0.0.0/24 link-local scope. However,
509 * this may break certain IGMP snooping switches which rely on the old
510 * report behaviour.
511 *
512 * Return zero if the given group is one for which IGMP reports
513 * should be suppressed, or non-zero if reports should be issued.
514 */
515 static __inline int
516 igmp_isgroupreported(const struct in_addr addr)
517 {
518
519 if (in_allhosts(addr) ||
520 ((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr)))))
521 return (0);
522
523 return (1);
524 }
525
526 /*
527 * Construct a Router Alert option to use in outgoing packets.
528 */
529 static struct mbuf *
530 igmp_ra_alloc(void)
531 {
532 struct mbuf *m;
533 struct ipoption *p;
534
535 m = m_get(M_WAITOK, MT_DATA);
536 p = mtod(m, struct ipoption *);
537 p->ipopt_dst.s_addr = INADDR_ANY;
538 p->ipopt_list[0] = (char)IPOPT_RA; /* Router Alert Option */
539 p->ipopt_list[1] = 0x04; /* 4 bytes long */
540 p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */
541 p->ipopt_list[3] = 0x00; /* pad byte */
542 m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1];
543
544 return (m);
545 }
546
547 /*
548 * Attach IGMP when PF_INET is attached to an interface.
549 */
550 struct igmp_ifsoftc *
551 igmp_domifattach(struct ifnet *ifp)
552 {
553 struct igmp_ifsoftc *igi;
554
555 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
556 __func__, ifp, ifp->if_xname);
557
558 IGMP_LOCK();
559
560 igi = igi_alloc_locked(ifp);
561 if (!(ifp->if_flags & IFF_MULTICAST))
562 igi->igi_flags |= IGIF_SILENT;
563
564 IGMP_UNLOCK();
565
566 return (igi);
567 }
568
569 /*
570 * VIMAGE: assume curvnet set by caller.
571 */
572 static struct igmp_ifsoftc *
573 igi_alloc_locked(/*const*/ struct ifnet *ifp)
574 {
575 struct igmp_ifsoftc *igi;
576
577 IGMP_LOCK_ASSERT();
578
579 igi = malloc(sizeof(struct igmp_ifsoftc), M_IGMP, M_NOWAIT|M_ZERO);
580 if (igi == NULL)
581 goto out;
582
583 igi->igi_ifp = ifp;
584 igi->igi_version = V_igmp_default_version;
585 igi->igi_flags = 0;
586 igi->igi_rv = IGMP_RV_INIT;
587 igi->igi_qi = IGMP_QI_INIT;
588 igi->igi_qri = IGMP_QRI_INIT;
589 igi->igi_uri = IGMP_URI_INIT;
590 mbufq_init(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
591
592 LIST_INSERT_HEAD(&V_igi_head, igi, igi_link);
593
594 CTR2(KTR_IGMPV3, "allocate igmp_ifsoftc for ifp %p(%s)",
595 ifp, ifp->if_xname);
596
597 out:
598 return (igi);
599 }
600
601 /*
602 * Hook for ifdetach.
603 *
604 * NOTE: Some finalization tasks need to run before the protocol domain
605 * is detached, but also before the link layer does its cleanup.
606 *
607 * SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK().
608 * XXX This is also bitten by unlocked ifma_protospec access.
609 */
610 void
611 igmp_ifdetach(struct ifnet *ifp)
612 {
613 struct igmp_ifsoftc *igi;
614 struct ifmultiaddr *ifma, *next;
615 struct in_multi *inm;
616 struct in_multi_head inm_free_tmp;
617 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp,
618 ifp->if_xname);
619
620 SLIST_INIT(&inm_free_tmp);
621 IGMP_LOCK();
622
623 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
624 if (igi->igi_version == IGMP_VERSION_3) {
625 IF_ADDR_WLOCK(ifp);
626 restart:
627 CK_STAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) {
628 if (ifma->ifma_addr->sa_family != AF_INET ||
629 ifma->ifma_protospec == NULL)
630 continue;
631 inm = (struct in_multi *)ifma->ifma_protospec;
632 if (inm->inm_state == IGMP_LEAVING_MEMBER)
633 inm_rele_locked(&inm_free_tmp, inm);
634 inm_clear_recorded(inm);
635 if (__predict_false(ifma_restart)) {
636 ifma_restart = false;
637 goto restart;
638 }
639 }
640 IF_ADDR_WUNLOCK(ifp);
641 inm_release_list_deferred(&inm_free_tmp);
642 }
643 IGMP_UNLOCK();
644
645 }
646
647 /*
648 * Hook for domifdetach.
649 */
650 void
651 igmp_domifdetach(struct ifnet *ifp)
652 {
653
654 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
655 __func__, ifp, ifp->if_xname);
656
657 IGMP_LOCK();
658 igi_delete_locked(ifp);
659 IGMP_UNLOCK();
660 }
661
662 static void
663 igi_delete_locked(const struct ifnet *ifp)
664 {
665 struct igmp_ifsoftc *igi, *tigi;
666
667 CTR3(KTR_IGMPV3, "%s: freeing igmp_ifsoftc for ifp %p(%s)",
668 __func__, ifp, ifp->if_xname);
669
670 IGMP_LOCK_ASSERT();
671
672 LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) {
673 if (igi->igi_ifp == ifp) {
674 /*
675 * Free deferred General Query responses.
676 */
677 mbufq_drain(&igi->igi_gq);
678
679 LIST_REMOVE(igi, igi_link);
680 free(igi, M_IGMP);
681 return;
682 }
683 }
684 }
685
686 /*
687 * Process a received IGMPv1 query.
688 * Return non-zero if the message should be dropped.
689 *
690 * VIMAGE: The curvnet pointer is derived from the input ifp.
691 */
692 static int
693 igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip,
694 const struct igmp *igmp)
695 {
696 struct ifmultiaddr *ifma;
697 struct igmp_ifsoftc *igi;
698 struct in_multi *inm;
699
700 /*
701 * IGMPv1 Host Mmembership Queries SHOULD always be addressed to
702 * 224.0.0.1. They are always treated as General Queries.
703 * igmp_group is always ignored. Do not drop it as a userland
704 * daemon may wish to see it.
705 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
706 */
707 if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) {
708 IGMPSTAT_INC(igps_rcv_badqueries);
709 return (0);
710 }
711 IGMPSTAT_INC(igps_rcv_gen_queries);
712
713 IN_MULTI_LIST_LOCK();
714 IGMP_LOCK();
715
716 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
717 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
718
719 if (igi->igi_flags & IGIF_LOOPBACK) {
720 CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)",
721 ifp, ifp->if_xname);
722 goto out_locked;
723 }
724
725 /*
726 * Switch to IGMPv1 host compatibility mode.
727 */
728 igmp_set_version(igi, IGMP_VERSION_1);
729
730 CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname);
731
732 /*
733 * Start the timers in all of our group records
734 * for the interface on which the query arrived,
735 * except those which are already running.
736 */
737 IF_ADDR_RLOCK(ifp);
738 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
739 if (ifma->ifma_addr->sa_family != AF_INET ||
740 ifma->ifma_protospec == NULL)
741 continue;
742 inm = (struct in_multi *)ifma->ifma_protospec;
743 if (inm->inm_timer != 0)
744 continue;
745 switch (inm->inm_state) {
746 case IGMP_NOT_MEMBER:
747 case IGMP_SILENT_MEMBER:
748 break;
749 case IGMP_G_QUERY_PENDING_MEMBER:
750 case IGMP_SG_QUERY_PENDING_MEMBER:
751 case IGMP_REPORTING_MEMBER:
752 case IGMP_IDLE_MEMBER:
753 case IGMP_LAZY_MEMBER:
754 case IGMP_SLEEPING_MEMBER:
755 case IGMP_AWAKENING_MEMBER:
756 inm->inm_state = IGMP_REPORTING_MEMBER;
757 inm->inm_timer = IGMP_RANDOM_DELAY(
758 IGMP_V1V2_MAX_RI * PR_FASTHZ);
759 V_current_state_timers_running = 1;
760 break;
761 case IGMP_LEAVING_MEMBER:
762 break;
763 }
764 }
765 IF_ADDR_RUNLOCK(ifp);
766
767 out_locked:
768 IGMP_UNLOCK();
769 IN_MULTI_LIST_UNLOCK();
770
771 return (0);
772 }
773
774 /*
775 * Process a received IGMPv2 general or group-specific query.
776 */
777 static int
778 igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip,
779 const struct igmp *igmp)
780 {
781 struct ifmultiaddr *ifma;
782 struct igmp_ifsoftc *igi;
783 struct in_multi *inm;
784 int is_general_query;
785 uint16_t timer;
786
787 is_general_query = 0;
788
789 /*
790 * Validate address fields upfront.
791 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
792 */
793 if (in_nullhost(igmp->igmp_group)) {
794 /*
795 * IGMPv2 General Query.
796 * If this was not sent to the all-hosts group, ignore it.
797 */
798 if (!in_allhosts(ip->ip_dst))
799 return (0);
800 IGMPSTAT_INC(igps_rcv_gen_queries);
801 is_general_query = 1;
802 } else {
803 /* IGMPv2 Group-Specific Query. */
804 IGMPSTAT_INC(igps_rcv_group_queries);
805 }
806
807 IN_MULTI_LIST_LOCK();
808 IGMP_LOCK();
809
810 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
811 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
812
813 if (igi->igi_flags & IGIF_LOOPBACK) {
814 CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)",
815 ifp, ifp->if_xname);
816 goto out_locked;
817 }
818
819 /*
820 * Ignore v2 query if in v1 Compatibility Mode.
821 */
822 if (igi->igi_version == IGMP_VERSION_1)
823 goto out_locked;
824
825 igmp_set_version(igi, IGMP_VERSION_2);
826
827 timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE;
828 if (timer == 0)
829 timer = 1;
830
831 if (is_general_query) {
832 /*
833 * For each reporting group joined on this
834 * interface, kick the report timer.
835 */
836 CTR2(KTR_IGMPV3, "process v2 general query on ifp %p(%s)",
837 ifp, ifp->if_xname);
838 IF_ADDR_RLOCK(ifp);
839 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
840 if (ifma->ifma_addr->sa_family != AF_INET ||
841 ifma->ifma_protospec == NULL)
842 continue;
843 inm = (struct in_multi *)ifma->ifma_protospec;
844 igmp_v2_update_group(inm, timer);
845 }
846 IF_ADDR_RUNLOCK(ifp);
847 } else {
848 /*
849 * Group-specific IGMPv2 query, we need only
850 * look up the single group to process it.
851 */
852 inm = inm_lookup(ifp, igmp->igmp_group);
853 if (inm != NULL) {
854 CTR3(KTR_IGMPV3,
855 "process v2 query 0x%08x on ifp %p(%s)",
856 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
857 igmp_v2_update_group(inm, timer);
858 }
859 }
860
861 out_locked:
862 IGMP_UNLOCK();
863 IN_MULTI_LIST_UNLOCK();
864
865 return (0);
866 }
867
868 /*
869 * Update the report timer on a group in response to an IGMPv2 query.
870 *
871 * If we are becoming the reporting member for this group, start the timer.
872 * If we already are the reporting member for this group, and timer is
873 * below the threshold, reset it.
874 *
875 * We may be updating the group for the first time since we switched
876 * to IGMPv3. If we are, then we must clear any recorded source lists,
877 * and transition to REPORTING state; the group timer is overloaded
878 * for group and group-source query responses.
879 *
880 * Unlike IGMPv3, the delay per group should be jittered
881 * to avoid bursts of IGMPv2 reports.
882 */
883 static void
884 igmp_v2_update_group(struct in_multi *inm, const int timer)
885 {
886
887 CTR4(KTR_IGMPV3, "0x%08x: %s/%s timer=%d", __func__,
888 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname, timer);
889
890 IN_MULTI_LIST_LOCK_ASSERT();
891
892 switch (inm->inm_state) {
893 case IGMP_NOT_MEMBER:
894 case IGMP_SILENT_MEMBER:
895 break;
896 case IGMP_REPORTING_MEMBER:
897 if (inm->inm_timer != 0 &&
898 inm->inm_timer <= timer) {
899 CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, "
900 "skipping.", __func__);
901 break;
902 }
903 /* FALLTHROUGH */
904 case IGMP_SG_QUERY_PENDING_MEMBER:
905 case IGMP_G_QUERY_PENDING_MEMBER:
906 case IGMP_IDLE_MEMBER:
907 case IGMP_LAZY_MEMBER:
908 case IGMP_AWAKENING_MEMBER:
909 CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__);
910 inm->inm_state = IGMP_REPORTING_MEMBER;
911 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
912 V_current_state_timers_running = 1;
913 break;
914 case IGMP_SLEEPING_MEMBER:
915 CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__);
916 inm->inm_state = IGMP_AWAKENING_MEMBER;
917 break;
918 case IGMP_LEAVING_MEMBER:
919 break;
920 }
921 }
922
923 /*
924 * Process a received IGMPv3 general, group-specific or
925 * group-and-source-specific query.
926 * Assumes m has already been pulled up to the full IGMP message length.
927 * Return 0 if successful, otherwise an appropriate error code is returned.
928 */
929 static int
930 igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip,
931 /*const*/ struct igmpv3 *igmpv3)
932 {
933 struct igmp_ifsoftc *igi;
934 struct in_multi *inm;
935 int is_general_query;
936 uint32_t maxresp, nsrc, qqi;
937 uint16_t timer;
938 uint8_t qrv;
939
940 is_general_query = 0;
941
942 CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname);
943
944 maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */
945 if (maxresp >= 128) {
946 maxresp = IGMP_MANT(igmpv3->igmp_code) <<
947 (IGMP_EXP(igmpv3->igmp_code) + 3);
948 }
949
950 /*
951 * Robustness must never be less than 2 for on-wire IGMPv3.
952 * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make
953 * an exception for interfaces whose IGMPv3 state changes
954 * are redirected to loopback (e.g. MANET).
955 */
956 qrv = IGMP_QRV(igmpv3->igmp_misc);
957 if (qrv < 2) {
958 CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__,
959 qrv, IGMP_RV_INIT);
960 qrv = IGMP_RV_INIT;
961 }
962
963 qqi = igmpv3->igmp_qqi;
964 if (qqi >= 128) {
965 qqi = IGMP_MANT(igmpv3->igmp_qqi) <<
966 (IGMP_EXP(igmpv3->igmp_qqi) + 3);
967 }
968
969 timer = maxresp * PR_FASTHZ / IGMP_TIMER_SCALE;
970 if (timer == 0)
971 timer = 1;
972
973 nsrc = ntohs(igmpv3->igmp_numsrc);
974
975 /*
976 * Validate address fields and versions upfront before
977 * accepting v3 query.
978 * XXX SMPng: Unlocked access to igmpstat counters here.
979 */
980 if (in_nullhost(igmpv3->igmp_group)) {
981 /*
982 * IGMPv3 General Query.
983 *
984 * General Queries SHOULD be directed to 224.0.0.1.
985 * A general query with a source list has undefined
986 * behaviour; discard it.
987 */
988 IGMPSTAT_INC(igps_rcv_gen_queries);
989 if (!in_allhosts(ip->ip_dst) || nsrc > 0) {
990 IGMPSTAT_INC(igps_rcv_badqueries);
991 return (0);
992 }
993 is_general_query = 1;
994 } else {
995 /* Group or group-source specific query. */
996 if (nsrc == 0)
997 IGMPSTAT_INC(igps_rcv_group_queries);
998 else
999 IGMPSTAT_INC(igps_rcv_gsr_queries);
1000 }
1001
1002 IN_MULTI_LIST_LOCK();
1003 IGMP_LOCK();
1004
1005 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
1006 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
1007
1008 if (igi->igi_flags & IGIF_LOOPBACK) {
1009 CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)",
1010 ifp, ifp->if_xname);
1011 goto out_locked;
1012 }
1013
1014 /*
1015 * Discard the v3 query if we're in Compatibility Mode.
1016 * The RFC is not obviously worded that hosts need to stay in
1017 * compatibility mode until the Old Version Querier Present
1018 * timer expires.
1019 */
1020 if (igi->igi_version != IGMP_VERSION_3) {
1021 CTR3(KTR_IGMPV3, "ignore v3 query in v%d mode on ifp %p(%s)",
1022 igi->igi_version, ifp, ifp->if_xname);
1023 goto out_locked;
1024 }
1025
1026 igmp_set_version(igi, IGMP_VERSION_3);
1027 igi->igi_rv = qrv;
1028 igi->igi_qi = qqi;
1029 igi->igi_qri = maxresp;
1030
1031 CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi,
1032 maxresp);
1033
1034 if (is_general_query) {
1035 /*
1036 * Schedule a current-state report on this ifp for
1037 * all groups, possibly containing source lists.
1038 * If there is a pending General Query response
1039 * scheduled earlier than the selected delay, do
1040 * not schedule any other reports.
1041 * Otherwise, reset the interface timer.
1042 */
1043 CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)",
1044 ifp, ifp->if_xname);
1045 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) {
1046 igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer);
1047 V_interface_timers_running = 1;
1048 }
1049 } else {
1050 /*
1051 * Group-source-specific queries are throttled on
1052 * a per-group basis to defeat denial-of-service attempts.
1053 * Queries for groups we are not a member of on this
1054 * link are simply ignored.
1055 */
1056 inm = inm_lookup(ifp, igmpv3->igmp_group);
1057 if (inm == NULL)
1058 goto out_locked;
1059 if (nsrc > 0) {
1060 if (!ratecheck(&inm->inm_lastgsrtv,
1061 &V_igmp_gsrdelay)) {
1062 CTR1(KTR_IGMPV3, "%s: GS query throttled.",
1063 __func__);
1064 IGMPSTAT_INC(igps_drop_gsr_queries);
1065 goto out_locked;
1066 }
1067 }
1068 CTR3(KTR_IGMPV3, "process v3 0x%08x query on ifp %p(%s)",
1069 ntohl(igmpv3->igmp_group.s_addr), ifp, ifp->if_xname);
1070 /*
1071 * If there is a pending General Query response
1072 * scheduled sooner than the selected delay, no
1073 * further report need be scheduled.
1074 * Otherwise, prepare to respond to the
1075 * group-specific or group-and-source query.
1076 */
1077 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer)
1078 igmp_input_v3_group_query(inm, igi, timer, igmpv3);
1079 }
1080
1081 out_locked:
1082 IGMP_UNLOCK();
1083 IN_MULTI_LIST_UNLOCK();
1084
1085 return (0);
1086 }
1087
1088 /*
1089 * Process a received IGMPv3 group-specific or group-and-source-specific
1090 * query.
1091 * Return <0 if any error occurred. Currently this is ignored.
1092 */
1093 static int
1094 igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifsoftc *igi,
1095 int timer, /*const*/ struct igmpv3 *igmpv3)
1096 {
1097 int retval;
1098 uint16_t nsrc;
1099
1100 IN_MULTI_LIST_LOCK_ASSERT();
1101 IGMP_LOCK_ASSERT();
1102
1103 retval = 0;
1104
1105 switch (inm->inm_state) {
1106 case IGMP_NOT_MEMBER:
1107 case IGMP_SILENT_MEMBER:
1108 case IGMP_SLEEPING_MEMBER:
1109 case IGMP_LAZY_MEMBER:
1110 case IGMP_AWAKENING_MEMBER:
1111 case IGMP_IDLE_MEMBER:
1112 case IGMP_LEAVING_MEMBER:
1113 return (retval);
1114 break;
1115 case IGMP_REPORTING_MEMBER:
1116 case IGMP_G_QUERY_PENDING_MEMBER:
1117 case IGMP_SG_QUERY_PENDING_MEMBER:
1118 break;
1119 }
1120
1121 nsrc = ntohs(igmpv3->igmp_numsrc);
1122
1123 /*
1124 * Deal with group-specific queries upfront.
1125 * If any group query is already pending, purge any recorded
1126 * source-list state if it exists, and schedule a query response
1127 * for this group-specific query.
1128 */
1129 if (nsrc == 0) {
1130 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
1131 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) {
1132 inm_clear_recorded(inm);
1133 timer = min(inm->inm_timer, timer);
1134 }
1135 inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER;
1136 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1137 V_current_state_timers_running = 1;
1138 return (retval);
1139 }
1140
1141 /*
1142 * Deal with the case where a group-and-source-specific query has
1143 * been received but a group-specific query is already pending.
1144 */
1145 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) {
1146 timer = min(inm->inm_timer, timer);
1147 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1148 V_current_state_timers_running = 1;
1149 return (retval);
1150 }
1151
1152 /*
1153 * Finally, deal with the case where a group-and-source-specific
1154 * query has been received, where a response to a previous g-s-r
1155 * query exists, or none exists.
1156 * In this case, we need to parse the source-list which the Querier
1157 * has provided us with and check if we have any source list filter
1158 * entries at T1 for these sources. If we do not, there is no need
1159 * schedule a report and the query may be dropped.
1160 * If we do, we must record them and schedule a current-state
1161 * report for those sources.
1162 * FIXME: Handling source lists larger than 1 mbuf requires that
1163 * we pass the mbuf chain pointer down to this function, and use
1164 * m_getptr() to walk the chain.
1165 */
1166 if (inm->inm_nsrc > 0) {
1167 const struct in_addr *ap;
1168 int i, nrecorded;
1169
1170 ap = (const struct in_addr *)(igmpv3 + 1);
1171 nrecorded = 0;
1172 for (i = 0; i < nsrc; i++, ap++) {
1173 retval = inm_record_source(inm, ap->s_addr);
1174 if (retval < 0)
1175 break;
1176 nrecorded += retval;
1177 }
1178 if (nrecorded > 0) {
1179 CTR1(KTR_IGMPV3,
1180 "%s: schedule response to SG query", __func__);
1181 inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER;
1182 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1183 V_current_state_timers_running = 1;
1184 }
1185 }
1186
1187 return (retval);
1188 }
1189
1190 /*
1191 * Process a received IGMPv1 host membership report.
1192 *
1193 * NOTE: 0.0.0.0 workaround breaks const correctness.
1194 */
1195 static int
1196 igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1197 /*const*/ struct igmp *igmp)
1198 {
1199 struct rm_priotracker in_ifa_tracker;
1200 struct in_ifaddr *ia;
1201 struct in_multi *inm;
1202
1203 IGMPSTAT_INC(igps_rcv_reports);
1204
1205 if (ifp->if_flags & IFF_LOOPBACK)
1206 return (0);
1207
1208 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1209 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1210 IGMPSTAT_INC(igps_rcv_badreports);
1211 return (EINVAL);
1212 }
1213
1214 /*
1215 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1216 * Booting clients may use the source address 0.0.0.0. Some
1217 * IGMP daemons may not know how to use IP_RECVIF to determine
1218 * the interface upon which this message was received.
1219 * Replace 0.0.0.0 with the subnet address if told to do so.
1220 */
1221 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1222 NET_EPOCH_ENTER();
1223 IFP_TO_IA(ifp, ia, &in_ifa_tracker);
1224 if (ia != NULL)
1225 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1226 NET_EPOCH_EXIT();
1227 }
1228
1229 CTR3(KTR_IGMPV3, "process v1 report 0x%08x on ifp %p(%s)",
1230 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1231
1232 /*
1233 * IGMPv1 report suppression.
1234 * If we are a member of this group, and our membership should be
1235 * reported, stop our group timer and transition to the 'lazy' state.
1236 */
1237 IN_MULTI_LIST_LOCK();
1238 inm = inm_lookup(ifp, igmp->igmp_group);
1239 if (inm != NULL) {
1240 struct igmp_ifsoftc *igi;
1241
1242 igi = inm->inm_igi;
1243 if (igi == NULL) {
1244 KASSERT(igi != NULL,
1245 ("%s: no igi for ifp %p", __func__, ifp));
1246 goto out_locked;
1247 }
1248
1249 IGMPSTAT_INC(igps_rcv_ourreports);
1250
1251 /*
1252 * If we are in IGMPv3 host mode, do not allow the
1253 * other host's IGMPv1 report to suppress our reports
1254 * unless explicitly configured to do so.
1255 */
1256 if (igi->igi_version == IGMP_VERSION_3) {
1257 if (V_igmp_legacysupp)
1258 igmp_v3_suppress_group_record(inm);
1259 goto out_locked;
1260 }
1261
1262 inm->inm_timer = 0;
1263
1264 switch (inm->inm_state) {
1265 case IGMP_NOT_MEMBER:
1266 case IGMP_SILENT_MEMBER:
1267 break;
1268 case IGMP_IDLE_MEMBER:
1269 case IGMP_LAZY_MEMBER:
1270 case IGMP_AWAKENING_MEMBER:
1271 CTR3(KTR_IGMPV3,
1272 "report suppressed for 0x%08x on ifp %p(%s)",
1273 ntohl(igmp->igmp_group.s_addr), ifp,
1274 ifp->if_xname);
1275 case IGMP_SLEEPING_MEMBER:
1276 inm->inm_state = IGMP_SLEEPING_MEMBER;
1277 break;
1278 case IGMP_REPORTING_MEMBER:
1279 CTR3(KTR_IGMPV3,
1280 "report suppressed for 0x%08x on ifp %p(%s)",
1281 ntohl(igmp->igmp_group.s_addr), ifp,
1282 ifp->if_xname);
1283 if (igi->igi_version == IGMP_VERSION_1)
1284 inm->inm_state = IGMP_LAZY_MEMBER;
1285 else if (igi->igi_version == IGMP_VERSION_2)
1286 inm->inm_state = IGMP_SLEEPING_MEMBER;
1287 break;
1288 case IGMP_G_QUERY_PENDING_MEMBER:
1289 case IGMP_SG_QUERY_PENDING_MEMBER:
1290 case IGMP_LEAVING_MEMBER:
1291 break;
1292 }
1293 }
1294
1295 out_locked:
1296 IN_MULTI_LIST_UNLOCK();
1297
1298 return (0);
1299 }
1300
1301 /*
1302 * Process a received IGMPv2 host membership report.
1303 *
1304 * NOTE: 0.0.0.0 workaround breaks const correctness.
1305 */
1306 static int
1307 igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1308 /*const*/ struct igmp *igmp)
1309 {
1310 struct rm_priotracker in_ifa_tracker;
1311 struct in_ifaddr *ia;
1312 struct in_multi *inm;
1313
1314 /*
1315 * Make sure we don't hear our own membership report. Fast
1316 * leave requires knowing that we are the only member of a
1317 * group.
1318 */
1319 NET_EPOCH_ENTER();
1320 IFP_TO_IA(ifp, ia, &in_ifa_tracker);
1321 if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) {
1322 NET_EPOCH_EXIT();
1323 return (0);
1324 }
1325
1326 IGMPSTAT_INC(igps_rcv_reports);
1327
1328 if (ifp->if_flags & IFF_LOOPBACK) {
1329 NET_EPOCH_EXIT();
1330 return (0);
1331 }
1332
1333 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1334 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1335 NET_EPOCH_EXIT();
1336 IGMPSTAT_INC(igps_rcv_badreports);
1337 return (EINVAL);
1338 }
1339
1340 /*
1341 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1342 * Booting clients may use the source address 0.0.0.0. Some
1343 * IGMP daemons may not know how to use IP_RECVIF to determine
1344 * the interface upon which this message was received.
1345 * Replace 0.0.0.0 with the subnet address if told to do so.
1346 */
1347 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1348 if (ia != NULL)
1349 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1350 }
1351 NET_EPOCH_EXIT();
1352
1353 CTR3(KTR_IGMPV3, "process v2 report 0x%08x on ifp %p(%s)",
1354 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1355
1356 /*
1357 * IGMPv2 report suppression.
1358 * If we are a member of this group, and our membership should be
1359 * reported, and our group timer is pending or about to be reset,
1360 * stop our group timer by transitioning to the 'lazy' state.
1361 */
1362 IN_MULTI_LIST_LOCK();
1363 inm = inm_lookup(ifp, igmp->igmp_group);
1364 if (inm != NULL) {
1365 struct igmp_ifsoftc *igi;
1366
1367 igi = inm->inm_igi;
1368 KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp));
1369
1370 IGMPSTAT_INC(igps_rcv_ourreports);
1371
1372 /*
1373 * If we are in IGMPv3 host mode, do not allow the
1374 * other host's IGMPv1 report to suppress our reports
1375 * unless explicitly configured to do so.
1376 */
1377 if (igi->igi_version == IGMP_VERSION_3) {
1378 if (V_igmp_legacysupp)
1379 igmp_v3_suppress_group_record(inm);
1380 goto out_locked;
1381 }
1382
1383 inm->inm_timer = 0;
1384
1385 switch (inm->inm_state) {
1386 case IGMP_NOT_MEMBER:
1387 case IGMP_SILENT_MEMBER:
1388 case IGMP_SLEEPING_MEMBER:
1389 break;
1390 case IGMP_REPORTING_MEMBER:
1391 case IGMP_IDLE_MEMBER:
1392 case IGMP_AWAKENING_MEMBER:
1393 CTR3(KTR_IGMPV3,
1394 "report suppressed for 0x%08x on ifp %p(%s)",
1395 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1396 case IGMP_LAZY_MEMBER:
1397 inm->inm_state = IGMP_LAZY_MEMBER;
1398 break;
1399 case IGMP_G_QUERY_PENDING_MEMBER:
1400 case IGMP_SG_QUERY_PENDING_MEMBER:
1401 case IGMP_LEAVING_MEMBER:
1402 break;
1403 }
1404 }
1405
1406 out_locked:
1407 IN_MULTI_LIST_UNLOCK();
1408
1409 return (0);
1410 }
1411
1412 int
1413 igmp_input(struct mbuf **mp, int *offp, int proto)
1414 {
1415 int iphlen;
1416 struct ifnet *ifp;
1417 struct igmp *igmp;
1418 struct ip *ip;
1419 struct mbuf *m;
1420 int igmplen;
1421 int minlen;
1422 int queryver;
1423
1424 CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, *mp, *offp);
1425
1426 m = *mp;
1427 ifp = m->m_pkthdr.rcvif;
1428 *mp = NULL;
1429
1430 IGMPSTAT_INC(igps_rcv_total);
1431
1432 ip = mtod(m, struct ip *);
1433 iphlen = *offp;
1434 igmplen = ntohs(ip->ip_len) - iphlen;
1435
1436 /*
1437 * Validate lengths.
1438 */
1439 if (igmplen < IGMP_MINLEN) {
1440 IGMPSTAT_INC(igps_rcv_tooshort);
1441 m_freem(m);
1442 return (IPPROTO_DONE);
1443 }
1444
1445 /*
1446 * Always pullup to the minimum size for v1/v2 or v3
1447 * to amortize calls to m_pullup().
1448 */
1449 minlen = iphlen;
1450 if (igmplen >= IGMP_V3_QUERY_MINLEN)
1451 minlen += IGMP_V3_QUERY_MINLEN;
1452 else
1453 minlen += IGMP_MINLEN;
1454 if ((!M_WRITABLE(m) || m->m_len < minlen) &&
1455 (m = m_pullup(m, minlen)) == NULL) {
1456 IGMPSTAT_INC(igps_rcv_tooshort);
1457 return (IPPROTO_DONE);
1458 }
1459 ip = mtod(m, struct ip *);
1460
1461 /*
1462 * Validate checksum.
1463 */
1464 m->m_data += iphlen;
1465 m->m_len -= iphlen;
1466 igmp = mtod(m, struct igmp *);
1467 if (in_cksum(m, igmplen)) {
1468 IGMPSTAT_INC(igps_rcv_badsum);
1469 m_freem(m);
1470 return (IPPROTO_DONE);
1471 }
1472 m->m_data -= iphlen;
1473 m->m_len += iphlen;
1474
1475 /*
1476 * IGMP control traffic is link-scope, and must have a TTL of 1.
1477 * DVMRP traffic (e.g. mrinfo, mtrace) is an exception;
1478 * probe packets may come from beyond the LAN.
1479 */
1480 if (igmp->igmp_type != IGMP_DVMRP && ip->ip_ttl != 1) {
1481 IGMPSTAT_INC(igps_rcv_badttl);
1482 m_freem(m);
1483 return (IPPROTO_DONE);
1484 }
1485
1486 switch (igmp->igmp_type) {
1487 case IGMP_HOST_MEMBERSHIP_QUERY:
1488 if (igmplen == IGMP_MINLEN) {
1489 if (igmp->igmp_code == 0)
1490 queryver = IGMP_VERSION_1;
1491 else
1492 queryver = IGMP_VERSION_2;
1493 } else if (igmplen >= IGMP_V3_QUERY_MINLEN) {
1494 queryver = IGMP_VERSION_3;
1495 } else {
1496 IGMPSTAT_INC(igps_rcv_tooshort);
1497 m_freem(m);
1498 return (IPPROTO_DONE);
1499 }
1500
1501 switch (queryver) {
1502 case IGMP_VERSION_1:
1503 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1504 if (!V_igmp_v1enable)
1505 break;
1506 if (igmp_input_v1_query(ifp, ip, igmp) != 0) {
1507 m_freem(m);
1508 return (IPPROTO_DONE);
1509 }
1510 break;
1511
1512 case IGMP_VERSION_2:
1513 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1514 if (!V_igmp_v2enable)
1515 break;
1516 if (igmp_input_v2_query(ifp, ip, igmp) != 0) {
1517 m_freem(m);
1518 return (IPPROTO_DONE);
1519 }
1520 break;
1521
1522 case IGMP_VERSION_3: {
1523 struct igmpv3 *igmpv3;
1524 uint16_t igmpv3len;
1525 uint16_t nsrc;
1526
1527 IGMPSTAT_INC(igps_rcv_v3_queries);
1528 igmpv3 = (struct igmpv3 *)igmp;
1529 /*
1530 * Validate length based on source count.
1531 */
1532 nsrc = ntohs(igmpv3->igmp_numsrc);
1533 if (nsrc * sizeof(in_addr_t) >
1534 UINT16_MAX - iphlen - IGMP_V3_QUERY_MINLEN) {
1535 IGMPSTAT_INC(igps_rcv_tooshort);
1536 m_freem(m);
1537 return (IPPROTO_DONE);
1538 }
1539 /*
1540 * m_pullup() may modify m, so pullup in
1541 * this scope.
1542 */
1543 igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN +
1544 sizeof(struct in_addr) * nsrc;
1545 if ((!M_WRITABLE(m) ||
1546 m->m_len < igmpv3len) &&
1547 (m = m_pullup(m, igmpv3len)) == NULL) {
1548 IGMPSTAT_INC(igps_rcv_tooshort);
1549 return (IPPROTO_DONE);
1550 }
1551 igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *)
1552 + iphlen);
1553 if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) {
1554 m_freem(m);
1555 return (IPPROTO_DONE);
1556 }
1557 }
1558 break;
1559 }
1560 break;
1561
1562 case IGMP_v1_HOST_MEMBERSHIP_REPORT:
1563 if (!V_igmp_v1enable)
1564 break;
1565 if (igmp_input_v1_report(ifp, ip, igmp) != 0) {
1566 m_freem(m);
1567 return (IPPROTO_DONE);
1568 }
1569 break;
1570
1571 case IGMP_v2_HOST_MEMBERSHIP_REPORT:
1572 if (!V_igmp_v2enable)
1573 break;
1574 if (!ip_checkrouteralert(m))
1575 IGMPSTAT_INC(igps_rcv_nora);
1576 if (igmp_input_v2_report(ifp, ip, igmp) != 0) {
1577 m_freem(m);
1578 return (IPPROTO_DONE);
1579 }
1580 break;
1581
1582 case IGMP_v3_HOST_MEMBERSHIP_REPORT:
1583 /*
1584 * Hosts do not need to process IGMPv3 membership reports,
1585 * as report suppression is no longer required.
1586 */
1587 if (!ip_checkrouteralert(m))
1588 IGMPSTAT_INC(igps_rcv_nora);
1589 break;
1590
1591 default:
1592 break;
1593 }
1594
1595 /*
1596 * Pass all valid IGMP packets up to any process(es) listening on a
1597 * raw IGMP socket.
1598 */
1599 *mp = m;
1600 return (rip_input(mp, offp, proto));
1601 }
1602
1603
1604 /*
1605 * Fast timeout handler (global).
1606 * VIMAGE: Timeout handlers are expected to service all vimages.
1607 */
1608 void
1609 igmp_fasttimo(void)
1610 {
1611 VNET_ITERATOR_DECL(vnet_iter);
1612
1613 VNET_LIST_RLOCK_NOSLEEP();
1614 VNET_FOREACH(vnet_iter) {
1615 CURVNET_SET(vnet_iter);
1616 igmp_fasttimo_vnet();
1617 CURVNET_RESTORE();
1618 }
1619 VNET_LIST_RUNLOCK_NOSLEEP();
1620 }
1621
1622 /*
1623 * Fast timeout handler (per-vnet).
1624 * Sends are shuffled off to a netisr to deal with Giant.
1625 *
1626 * VIMAGE: Assume caller has set up our curvnet.
1627 */
1628 static void
1629 igmp_fasttimo_vnet(void)
1630 {
1631 struct mbufq scq; /* State-change packets */
1632 struct mbufq qrq; /* Query response packets */
1633 struct ifnet *ifp;
1634 struct igmp_ifsoftc *igi;
1635 struct ifmultiaddr *ifma, *next;
1636 struct in_multi *inm;
1637 struct in_multi_head inm_free_tmp;
1638 int loop, uri_fasthz;
1639
1640 loop = 0;
1641 uri_fasthz = 0;
1642
1643 /*
1644 * Quick check to see if any work needs to be done, in order to
1645 * minimize the overhead of fasttimo processing.
1646 * SMPng: XXX Unlocked reads.
1647 */
1648 if (!V_current_state_timers_running &&
1649 !V_interface_timers_running &&
1650 !V_state_change_timers_running)
1651 return;
1652
1653 SLIST_INIT(&inm_free_tmp);
1654 IN_MULTI_LIST_LOCK();
1655 IGMP_LOCK();
1656
1657 /*
1658 * IGMPv3 General Query response timer processing.
1659 */
1660 if (V_interface_timers_running) {
1661 CTR1(KTR_IGMPV3, "%s: interface timers running", __func__);
1662
1663 V_interface_timers_running = 0;
1664 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1665 if (igi->igi_v3_timer == 0) {
1666 /* Do nothing. */
1667 } else if (--igi->igi_v3_timer == 0) {
1668 igmp_v3_dispatch_general_query(igi);
1669 } else {
1670 V_interface_timers_running = 1;
1671 }
1672 }
1673 }
1674
1675 if (!V_current_state_timers_running &&
1676 !V_state_change_timers_running)
1677 goto out_locked;
1678
1679 V_current_state_timers_running = 0;
1680 V_state_change_timers_running = 0;
1681
1682 CTR1(KTR_IGMPV3, "%s: state change timers running", __func__);
1683
1684 /*
1685 * IGMPv1/v2/v3 host report and state-change timer processing.
1686 * Note: Processing a v3 group timer may remove a node.
1687 */
1688 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1689 ifp = igi->igi_ifp;
1690
1691 if (igi->igi_version == IGMP_VERSION_3) {
1692 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
1693 uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri *
1694 PR_FASTHZ);
1695 mbufq_init(&qrq, IGMP_MAX_G_GS_PACKETS);
1696 mbufq_init(&scq, IGMP_MAX_STATE_CHANGE_PACKETS);
1697 }
1698
1699 IF_ADDR_WLOCK(ifp);
1700 restart:
1701 CK_STAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) {
1702 if (ifma->ifma_addr->sa_family != AF_INET ||
1703 ifma->ifma_protospec == NULL)
1704 continue;
1705 inm = (struct in_multi *)ifma->ifma_protospec;
1706 switch (igi->igi_version) {
1707 case IGMP_VERSION_1:
1708 case IGMP_VERSION_2:
1709 igmp_v1v2_process_group_timer(inm,
1710 igi->igi_version);
1711 break;
1712 case IGMP_VERSION_3:
1713 igmp_v3_process_group_timers(&inm_free_tmp, &qrq,
1714 &scq, inm, uri_fasthz);
1715 break;
1716 }
1717 if (__predict_false(ifma_restart)) {
1718 ifma_restart = false;
1719 goto restart;
1720 }
1721 }
1722 IF_ADDR_WUNLOCK(ifp);
1723
1724 if (igi->igi_version == IGMP_VERSION_3) {
1725 igmp_dispatch_queue(&qrq, 0, loop);
1726 igmp_dispatch_queue(&scq, 0, loop);
1727
1728 /*
1729 * Free the in_multi reference(s) for this
1730 * IGMP lifecycle.
1731 */
1732 inm_release_list_deferred(&inm_free_tmp);
1733 }
1734 }
1735
1736 out_locked:
1737 IGMP_UNLOCK();
1738 IN_MULTI_LIST_UNLOCK();
1739 }
1740
1741 /*
1742 * Update host report group timer for IGMPv1/v2.
1743 * Will update the global pending timer flags.
1744 */
1745 static void
1746 igmp_v1v2_process_group_timer(struct in_multi *inm, const int version)
1747 {
1748 int report_timer_expired;
1749
1750 IN_MULTI_LIST_LOCK_ASSERT();
1751 IGMP_LOCK_ASSERT();
1752
1753 if (inm->inm_timer == 0) {
1754 report_timer_expired = 0;
1755 } else if (--inm->inm_timer == 0) {
1756 report_timer_expired = 1;
1757 } else {
1758 V_current_state_timers_running = 1;
1759 return;
1760 }
1761
1762 switch (inm->inm_state) {
1763 case IGMP_NOT_MEMBER:
1764 case IGMP_SILENT_MEMBER:
1765 case IGMP_IDLE_MEMBER:
1766 case IGMP_LAZY_MEMBER:
1767 case IGMP_SLEEPING_MEMBER:
1768 case IGMP_AWAKENING_MEMBER:
1769 break;
1770 case IGMP_REPORTING_MEMBER:
1771 if (report_timer_expired) {
1772 inm->inm_state = IGMP_IDLE_MEMBER;
1773 (void)igmp_v1v2_queue_report(inm,
1774 (version == IGMP_VERSION_2) ?
1775 IGMP_v2_HOST_MEMBERSHIP_REPORT :
1776 IGMP_v1_HOST_MEMBERSHIP_REPORT);
1777 }
1778 break;
1779 case IGMP_G_QUERY_PENDING_MEMBER:
1780 case IGMP_SG_QUERY_PENDING_MEMBER:
1781 case IGMP_LEAVING_MEMBER:
1782 break;
1783 }
1784 }
1785
1786 /*
1787 * Update a group's timers for IGMPv3.
1788 * Will update the global pending timer flags.
1789 * Note: Unlocked read from igi.
1790 */
1791 static void
1792 igmp_v3_process_group_timers(struct in_multi_head *inmh,
1793 struct mbufq *qrq, struct mbufq *scq,
1794 struct in_multi *inm, const int uri_fasthz)
1795 {
1796 int query_response_timer_expired;
1797 int state_change_retransmit_timer_expired;
1798
1799 IN_MULTI_LIST_LOCK_ASSERT();
1800 IGMP_LOCK_ASSERT();
1801
1802 query_response_timer_expired = 0;
1803 state_change_retransmit_timer_expired = 0;
1804
1805 /*
1806 * During a transition from v1/v2 compatibility mode back to v3,
1807 * a group record in REPORTING state may still have its group
1808 * timer active. This is a no-op in this function; it is easier
1809 * to deal with it here than to complicate the slow-timeout path.
1810 */
1811 if (inm->inm_timer == 0) {
1812 query_response_timer_expired = 0;
1813 } else if (--inm->inm_timer == 0) {
1814 query_response_timer_expired = 1;
1815 } else {
1816 V_current_state_timers_running = 1;
1817 }
1818
1819 if (inm->inm_sctimer == 0) {
1820 state_change_retransmit_timer_expired = 0;
1821 } else if (--inm->inm_sctimer == 0) {
1822 state_change_retransmit_timer_expired = 1;
1823 } else {
1824 V_state_change_timers_running = 1;
1825 }
1826
1827 /* We are in fasttimo, so be quick about it. */
1828 if (!state_change_retransmit_timer_expired &&
1829 !query_response_timer_expired)
1830 return;
1831
1832 switch (inm->inm_state) {
1833 case IGMP_NOT_MEMBER:
1834 case IGMP_SILENT_MEMBER:
1835 case IGMP_SLEEPING_MEMBER:
1836 case IGMP_LAZY_MEMBER:
1837 case IGMP_AWAKENING_MEMBER:
1838 case IGMP_IDLE_MEMBER:
1839 break;
1840 case IGMP_G_QUERY_PENDING_MEMBER:
1841 case IGMP_SG_QUERY_PENDING_MEMBER:
1842 /*
1843 * Respond to a previously pending Group-Specific
1844 * or Group-and-Source-Specific query by enqueueing
1845 * the appropriate Current-State report for
1846 * immediate transmission.
1847 */
1848 if (query_response_timer_expired) {
1849 int retval __unused;
1850
1851 retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1,
1852 (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER));
1853 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
1854 __func__, retval);
1855 inm->inm_state = IGMP_REPORTING_MEMBER;
1856 /* XXX Clear recorded sources for next time. */
1857 inm_clear_recorded(inm);
1858 }
1859 /* FALLTHROUGH */
1860 case IGMP_REPORTING_MEMBER:
1861 case IGMP_LEAVING_MEMBER:
1862 if (state_change_retransmit_timer_expired) {
1863 /*
1864 * State-change retransmission timer fired.
1865 * If there are any further pending retransmissions,
1866 * set the global pending state-change flag, and
1867 * reset the timer.
1868 */
1869 if (--inm->inm_scrv > 0) {
1870 inm->inm_sctimer = uri_fasthz;
1871 V_state_change_timers_running = 1;
1872 }
1873 /*
1874 * Retransmit the previously computed state-change
1875 * report. If there are no further pending
1876 * retransmissions, the mbuf queue will be consumed.
1877 * Update T0 state to T1 as we have now sent
1878 * a state-change.
1879 */
1880 (void)igmp_v3_merge_state_changes(inm, scq);
1881
1882 inm_commit(inm);
1883 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
1884 ntohl(inm->inm_addr.s_addr),
1885 inm->inm_ifp->if_xname);
1886
1887 /*
1888 * If we are leaving the group for good, make sure
1889 * we release IGMP's reference to it.
1890 * This release must be deferred using a SLIST,
1891 * as we are called from a loop which traverses
1892 * the in_ifmultiaddr TAILQ.
1893 */
1894 if (inm->inm_state == IGMP_LEAVING_MEMBER &&
1895 inm->inm_scrv == 0) {
1896 inm->inm_state = IGMP_NOT_MEMBER;
1897 inm_rele_locked(inmh, inm);
1898 }
1899 }
1900 break;
1901 }
1902 }
1903
1904
1905 /*
1906 * Suppress a group's pending response to a group or source/group query.
1907 *
1908 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency.
1909 * Do NOT update ST1/ST0 as this operation merely suppresses
1910 * the currently pending group record.
1911 * Do NOT suppress the response to a general query. It is possible but
1912 * it would require adding another state or flag.
1913 */
1914 static void
1915 igmp_v3_suppress_group_record(struct in_multi *inm)
1916 {
1917
1918 IN_MULTI_LIST_LOCK_ASSERT();
1919
1920 KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3,
1921 ("%s: not IGMPv3 mode on link", __func__));
1922
1923 if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER ||
1924 inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER)
1925 return;
1926
1927 if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
1928 inm_clear_recorded(inm);
1929
1930 inm->inm_timer = 0;
1931 inm->inm_state = IGMP_REPORTING_MEMBER;
1932 }
1933
1934 /*
1935 * Switch to a different IGMP version on the given interface,
1936 * as per Section 7.2.1.
1937 */
1938 static void
1939 igmp_set_version(struct igmp_ifsoftc *igi, const int version)
1940 {
1941 int old_version_timer;
1942
1943 IGMP_LOCK_ASSERT();
1944
1945 CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__,
1946 version, igi->igi_ifp, igi->igi_ifp->if_xname);
1947
1948 if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) {
1949 /*
1950 * Compute the "Older Version Querier Present" timer as per
1951 * Section 8.12.
1952 */
1953 old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri;
1954 old_version_timer *= PR_SLOWHZ;
1955
1956 if (version == IGMP_VERSION_1) {
1957 igi->igi_v1_timer = old_version_timer;
1958 igi->igi_v2_timer = 0;
1959 } else if (version == IGMP_VERSION_2) {
1960 igi->igi_v1_timer = 0;
1961 igi->igi_v2_timer = old_version_timer;
1962 }
1963 }
1964
1965 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
1966 if (igi->igi_version != IGMP_VERSION_2) {
1967 igi->igi_version = IGMP_VERSION_2;
1968 igmp_v3_cancel_link_timers(igi);
1969 }
1970 } else if (igi->igi_v1_timer > 0) {
1971 if (igi->igi_version != IGMP_VERSION_1) {
1972 igi->igi_version = IGMP_VERSION_1;
1973 igmp_v3_cancel_link_timers(igi);
1974 }
1975 }
1976 }
1977
1978 /*
1979 * Cancel pending IGMPv3 timers for the given link and all groups
1980 * joined on it; state-change, general-query, and group-query timers.
1981 *
1982 * Only ever called on a transition from v3 to Compatibility mode. Kill
1983 * the timers stone dead (this may be expensive for large N groups), they
1984 * will be restarted if Compatibility Mode deems that they must be due to
1985 * query processing.
1986 */
1987 static void
1988 igmp_v3_cancel_link_timers(struct igmp_ifsoftc *igi)
1989 {
1990 struct ifmultiaddr *ifma, *ifmatmp;
1991 struct ifnet *ifp;
1992 struct in_multi *inm;
1993 struct in_multi_head inm_free_tmp;
1994
1995 CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__,
1996 igi->igi_ifp, igi->igi_ifp->if_xname);
1997
1998 IN_MULTI_LIST_LOCK_ASSERT();
1999 IGMP_LOCK_ASSERT();
2000 SLIST_INIT(&inm_free_tmp);
2001
2002 /*
2003 * Stop the v3 General Query Response on this link stone dead.
2004 * If fasttimo is woken up due to V_interface_timers_running,
2005 * the flag will be cleared if there are no pending link timers.
2006 */
2007 igi->igi_v3_timer = 0;
2008
2009 /*
2010 * Now clear the current-state and state-change report timers
2011 * for all memberships scoped to this link.
2012 */
2013 ifp = igi->igi_ifp;
2014 IF_ADDR_WLOCK(ifp);
2015 CK_STAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, ifmatmp) {
2016 if (ifma->ifma_addr->sa_family != AF_INET ||
2017 ifma->ifma_protospec == NULL)
2018 continue;
2019 inm = (struct in_multi *)ifma->ifma_protospec;
2020 switch (inm->inm_state) {
2021 case IGMP_NOT_MEMBER:
2022 case IGMP_SILENT_MEMBER:
2023 case IGMP_IDLE_MEMBER:
2024 case IGMP_LAZY_MEMBER:
2025 case IGMP_SLEEPING_MEMBER:
2026 case IGMP_AWAKENING_MEMBER:
2027 /*
2028 * These states are either not relevant in v3 mode,
2029 * or are unreported. Do nothing.
2030 */
2031 break;
2032 case IGMP_LEAVING_MEMBER:
2033 /*
2034 * If we are leaving the group and switching to
2035 * compatibility mode, we need to release the final
2036 * reference held for issuing the INCLUDE {}, and
2037 * transition to REPORTING to ensure the host leave
2038 * message is sent upstream to the old querier --
2039 * transition to NOT would lose the leave and race.
2040 */
2041 inm_rele_locked(&inm_free_tmp, inm);
2042 /* FALLTHROUGH */
2043 case IGMP_G_QUERY_PENDING_MEMBER:
2044 case IGMP_SG_QUERY_PENDING_MEMBER:
2045 inm_clear_recorded(inm);
2046 /* FALLTHROUGH */
2047 case IGMP_REPORTING_MEMBER:
2048 inm->inm_state = IGMP_REPORTING_MEMBER;
2049 break;
2050 }
2051 /*
2052 * Always clear state-change and group report timers.
2053 * Free any pending IGMPv3 state-change records.
2054 */
2055 inm->inm_sctimer = 0;
2056 inm->inm_timer = 0;
2057 mbufq_drain(&inm->inm_scq);
2058 }
2059 IF_ADDR_WUNLOCK(ifp);
2060
2061 inm_release_list_deferred(&inm_free_tmp);
2062 }
2063
2064 /*
2065 * Update the Older Version Querier Present timers for a link.
2066 * See Section 7.2.1 of RFC 3376.
2067 */
2068 static void
2069 igmp_v1v2_process_querier_timers(struct igmp_ifsoftc *igi)
2070 {
2071
2072 IGMP_LOCK_ASSERT();
2073
2074 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) {
2075 /*
2076 * IGMPv1 and IGMPv2 Querier Present timers expired.
2077 *
2078 * Revert to IGMPv3.
2079 */
2080 if (igi->igi_version != IGMP_VERSION_3) {
2081 CTR5(KTR_IGMPV3,
2082 "%s: transition from v%d -> v%d on %p(%s)",
2083 __func__, igi->igi_version, IGMP_VERSION_3,
2084 igi->igi_ifp, igi->igi_ifp->if_xname);
2085 igi->igi_version = IGMP_VERSION_3;
2086 }
2087 } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2088 /*
2089 * IGMPv1 Querier Present timer expired,
2090 * IGMPv2 Querier Present timer running.
2091 * If IGMPv2 was disabled since last timeout,
2092 * revert to IGMPv3.
2093 * If IGMPv2 is enabled, revert to IGMPv2.
2094 */
2095 if (!V_igmp_v2enable) {
2096 CTR5(KTR_IGMPV3,
2097 "%s: transition from v%d -> v%d on %p(%s)",
2098 __func__, igi->igi_version, IGMP_VERSION_3,
2099 igi->igi_ifp, igi->igi_ifp->if_xname);
2100 igi->igi_v2_timer = 0;
2101 igi->igi_version = IGMP_VERSION_3;
2102 } else {
2103 --igi->igi_v2_timer;
2104 if (igi->igi_version != IGMP_VERSION_2) {
2105 CTR5(KTR_IGMPV3,
2106 "%s: transition from v%d -> v%d on %p(%s)",
2107 __func__, igi->igi_version, IGMP_VERSION_2,
2108 igi->igi_ifp, igi->igi_ifp->if_xname);
2109 igi->igi_version = IGMP_VERSION_2;
2110 igmp_v3_cancel_link_timers(igi);
2111 }
2112 }
2113 } else if (igi->igi_v1_timer > 0) {
2114 /*
2115 * IGMPv1 Querier Present timer running.
2116 * Stop IGMPv2 timer if running.
2117 *
2118 * If IGMPv1 was disabled since last timeout,
2119 * revert to IGMPv3.
2120 * If IGMPv1 is enabled, reset IGMPv2 timer if running.
2121 */
2122 if (!V_igmp_v1enable) {
2123 CTR5(KTR_IGMPV3,
2124 "%s: transition from v%d -> v%d on %p(%s)",
2125 __func__, igi->igi_version, IGMP_VERSION_3,
2126 igi->igi_ifp, igi->igi_ifp->if_xname);
2127 igi->igi_v1_timer = 0;
2128 igi->igi_version = IGMP_VERSION_3;
2129 } else {
2130 --igi->igi_v1_timer;
2131 }
2132 if (igi->igi_v2_timer > 0) {
2133 CTR3(KTR_IGMPV3,
2134 "%s: cancel v2 timer on %p(%s)",
2135 __func__, igi->igi_ifp, igi->igi_ifp->if_xname);
2136 igi->igi_v2_timer = 0;
2137 }
2138 }
2139 }
2140
2141 /*
2142 * Global slowtimo handler.
2143 * VIMAGE: Timeout handlers are expected to service all vimages.
2144 */
2145 void
2146 igmp_slowtimo(void)
2147 {
2148 VNET_ITERATOR_DECL(vnet_iter);
2149
2150 VNET_LIST_RLOCK_NOSLEEP();
2151 VNET_FOREACH(vnet_iter) {
2152 CURVNET_SET(vnet_iter);
2153 igmp_slowtimo_vnet();
2154 CURVNET_RESTORE();
2155 }
2156 VNET_LIST_RUNLOCK_NOSLEEP();
2157 }
2158
2159 /*
2160 * Per-vnet slowtimo handler.
2161 */
2162 static void
2163 igmp_slowtimo_vnet(void)
2164 {
2165 struct igmp_ifsoftc *igi;
2166
2167 IGMP_LOCK();
2168
2169 LIST_FOREACH(igi, &V_igi_head, igi_link) {
2170 igmp_v1v2_process_querier_timers(igi);
2171 }
2172
2173 IGMP_UNLOCK();
2174 }
2175
2176 /*
2177 * Dispatch an IGMPv1/v2 host report or leave message.
2178 * These are always small enough to fit inside a single mbuf.
2179 */
2180 static int
2181 igmp_v1v2_queue_report(struct in_multi *inm, const int type)
2182 {
2183 struct ifnet *ifp;
2184 struct igmp *igmp;
2185 struct ip *ip;
2186 struct mbuf *m;
2187
2188 IN_MULTI_LIST_LOCK_ASSERT();
2189 IGMP_LOCK_ASSERT();
2190
2191 ifp = inm->inm_ifp;
2192
2193 m = m_gethdr(M_NOWAIT, MT_DATA);
2194 if (m == NULL)
2195 return (ENOMEM);
2196 M_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp));
2197
2198 m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp);
2199
2200 m->m_data += sizeof(struct ip);
2201 m->m_len = sizeof(struct igmp);
2202
2203 igmp = mtod(m, struct igmp *);
2204 igmp->igmp_type = type;
2205 igmp->igmp_code = 0;
2206 igmp->igmp_group = inm->inm_addr;
2207 igmp->igmp_cksum = 0;
2208 igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp));
2209
2210 m->m_data -= sizeof(struct ip);
2211 m->m_len += sizeof(struct ip);
2212
2213 ip = mtod(m, struct ip *);
2214 ip->ip_tos = 0;
2215 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct igmp));
2216 ip->ip_off = 0;
2217 ip->ip_p = IPPROTO_IGMP;
2218 ip->ip_src.s_addr = INADDR_ANY;
2219
2220 if (type == IGMP_HOST_LEAVE_MESSAGE)
2221 ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP);
2222 else
2223 ip->ip_dst = inm->inm_addr;
2224
2225 igmp_save_context(m, ifp);
2226
2227 m->m_flags |= M_IGMPV2;
2228 if (inm->inm_igi->igi_flags & IGIF_LOOPBACK)
2229 m->m_flags |= M_IGMP_LOOP;
2230
2231 CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m);
2232 netisr_dispatch(NETISR_IGMP, m);
2233
2234 return (0);
2235 }
2236
2237 /*
2238 * Process a state change from the upper layer for the given IPv4 group.
2239 *
2240 * Each socket holds a reference on the in_multi in its own ip_moptions.
2241 * The socket layer will have made the necessary updates to.the group
2242 * state, it is now up to IGMP to issue a state change report if there
2243 * has been any change between T0 (when the last state-change was issued)
2244 * and T1 (now).
2245 *
2246 * We use the IGMPv3 state machine at group level. The IGMP module
2247 * however makes the decision as to which IGMP protocol version to speak.
2248 * A state change *from* INCLUDE {} always means an initial join.
2249 * A state change *to* INCLUDE {} always means a final leave.
2250 *
2251 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can
2252 * save ourselves a bunch of work; any exclusive mode groups need not
2253 * compute source filter lists.
2254 *
2255 * VIMAGE: curvnet should have been set by caller, as this routine
2256 * is called from the socket option handlers.
2257 */
2258 int
2259 igmp_change_state(struct in_multi *inm)
2260 {
2261 struct igmp_ifsoftc *igi;
2262 struct ifnet *ifp;
2263 int error;
2264
2265 error = 0;
2266 IN_MULTI_LOCK_ASSERT();
2267 /*
2268 * Try to detect if the upper layer just asked us to change state
2269 * for an interface which has now gone away.
2270 */
2271 KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__));
2272 ifp = inm->inm_ifma->ifma_ifp;
2273 /*
2274 * Sanity check that netinet's notion of ifp is the
2275 * same as net's.
2276 */
2277 KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__));
2278
2279 IGMP_LOCK();
2280
2281 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
2282 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
2283
2284 /*
2285 * If we detect a state transition to or from MCAST_UNDEFINED
2286 * for this group, then we are starting or finishing an IGMP
2287 * life cycle for this group.
2288 */
2289 if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) {
2290 CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__,
2291 inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode);
2292 if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) {
2293 CTR1(KTR_IGMPV3, "%s: initial join", __func__);
2294 error = igmp_initial_join(inm, igi);
2295 goto out_locked;
2296 } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) {
2297 CTR1(KTR_IGMPV3, "%s: final leave", __func__);
2298 igmp_final_leave(inm, igi);
2299 goto out_locked;
2300 }
2301 } else {
2302 CTR1(KTR_IGMPV3, "%s: filter set change", __func__);
2303 }
2304
2305 error = igmp_handle_state_change(inm, igi);
2306
2307 out_locked:
2308 IGMP_UNLOCK();
2309 return (error);
2310 }
2311
2312 /*
2313 * Perform the initial join for an IGMP group.
2314 *
2315 * When joining a group:
2316 * If the group should have its IGMP traffic suppressed, do nothing.
2317 * IGMPv1 starts sending IGMPv1 host membership reports.
2318 * IGMPv2 starts sending IGMPv2 host membership reports.
2319 * IGMPv3 will schedule an IGMPv3 state-change report containing the
2320 * initial state of the membership.
2321 */
2322 static int
2323 igmp_initial_join(struct in_multi *inm, struct igmp_ifsoftc *igi)
2324 {
2325 struct ifnet *ifp;
2326 struct mbufq *mq;
2327 int error, retval, syncstates;
2328
2329 CTR4(KTR_IGMPV3, "%s: initial join 0x%08x on ifp %p(%s)", __func__,
2330 ntohl(inm->inm_addr.s_addr), inm->inm_ifp, inm->inm_ifp->if_xname);
2331
2332 error = 0;
2333 syncstates = 1;
2334
2335 ifp = inm->inm_ifp;
2336
2337 IN_MULTI_LOCK_ASSERT();
2338 IGMP_LOCK_ASSERT();
2339
2340 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2341
2342 /*
2343 * Groups joined on loopback or marked as 'not reported',
2344 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and
2345 * are never reported in any IGMP protocol exchanges.
2346 * All other groups enter the appropriate IGMP state machine
2347 * for the version in use on this link.
2348 * A link marked as IGIF_SILENT causes IGMP to be completely
2349 * disabled for the link.
2350 */
2351 if ((ifp->if_flags & IFF_LOOPBACK) ||
2352 (igi->igi_flags & IGIF_SILENT) ||
2353 !igmp_isgroupreported(inm->inm_addr)) {
2354 CTR1(KTR_IGMPV3,
2355 "%s: not kicking state machine for silent group", __func__);
2356 inm->inm_state = IGMP_SILENT_MEMBER;
2357 inm->inm_timer = 0;
2358 } else {
2359 /*
2360 * Deal with overlapping in_multi lifecycle.
2361 * If this group was LEAVING, then make sure
2362 * we drop the reference we picked up to keep the
2363 * group around for the final INCLUDE {} enqueue.
2364 */
2365 if (igi->igi_version == IGMP_VERSION_3 &&
2366 inm->inm_state == IGMP_LEAVING_MEMBER) {
2367 MPASS(inm->inm_refcount > 1);
2368 inm_rele_locked(NULL, inm);
2369 }
2370 inm->inm_state = IGMP_REPORTING_MEMBER;
2371
2372 switch (igi->igi_version) {
2373 case IGMP_VERSION_1:
2374 case IGMP_VERSION_2:
2375 inm->inm_state = IGMP_IDLE_MEMBER;
2376 error = igmp_v1v2_queue_report(inm,
2377 (igi->igi_version == IGMP_VERSION_2) ?
2378 IGMP_v2_HOST_MEMBERSHIP_REPORT :
2379 IGMP_v1_HOST_MEMBERSHIP_REPORT);
2380 if (error == 0) {
2381 inm->inm_timer = IGMP_RANDOM_DELAY(
2382 IGMP_V1V2_MAX_RI * PR_FASTHZ);
2383 V_current_state_timers_running = 1;
2384 }
2385 break;
2386
2387 case IGMP_VERSION_3:
2388 /*
2389 * Defer update of T0 to T1, until the first copy
2390 * of the state change has been transmitted.
2391 */
2392 syncstates = 0;
2393
2394 /*
2395 * Immediately enqueue a State-Change Report for
2396 * this interface, freeing any previous reports.
2397 * Don't kick the timers if there is nothing to do,
2398 * or if an error occurred.
2399 */
2400 mq = &inm->inm_scq;
2401 mbufq_drain(mq);
2402 retval = igmp_v3_enqueue_group_record(mq, inm, 1,
2403 0, 0);
2404 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
2405 __func__, retval);
2406 if (retval <= 0) {
2407 error = retval * -1;
2408 break;
2409 }
2410
2411 /*
2412 * Schedule transmission of pending state-change
2413 * report up to RV times for this link. The timer
2414 * will fire at the next igmp_fasttimo (~200ms),
2415 * giving us an opportunity to merge the reports.
2416 */
2417 if (igi->igi_flags & IGIF_LOOPBACK) {
2418 inm->inm_scrv = 1;
2419 } else {
2420 KASSERT(igi->igi_rv > 1,
2421 ("%s: invalid robustness %d", __func__,
2422 igi->igi_rv));
2423 inm->inm_scrv = igi->igi_rv;
2424 }
2425 inm->inm_sctimer = 1;
2426 V_state_change_timers_running = 1;
2427
2428 error = 0;
2429 break;
2430 }
2431 }
2432
2433 /*
2434 * Only update the T0 state if state change is atomic,
2435 * i.e. we don't need to wait for a timer to fire before we
2436 * can consider the state change to have been communicated.
2437 */
2438 if (syncstates) {
2439 inm_commit(inm);
2440 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2441 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2442 }
2443
2444 return (error);
2445 }
2446
2447 /*
2448 * Issue an intermediate state change during the IGMP life-cycle.
2449 */
2450 static int
2451 igmp_handle_state_change(struct in_multi *inm, struct igmp_ifsoftc *igi)
2452 {
2453 struct ifnet *ifp;
2454 int retval;
2455
2456 CTR4(KTR_IGMPV3, "%s: state change for 0x%08x on ifp %p(%s)", __func__,
2457 ntohl(inm->inm_addr.s_addr), inm->inm_ifp, inm->inm_ifp->if_xname);
2458
2459 ifp = inm->inm_ifp;
2460
2461 IN_MULTI_LIST_LOCK_ASSERT();
2462 IGMP_LOCK_ASSERT();
2463
2464 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2465
2466 if ((ifp->if_flags & IFF_LOOPBACK) ||
2467 (igi->igi_flags & IGIF_SILENT) ||
2468 !igmp_isgroupreported(inm->inm_addr) ||
2469 (igi->igi_version != IGMP_VERSION_3)) {
2470 if (!igmp_isgroupreported(inm->inm_addr)) {
2471 CTR1(KTR_IGMPV3,
2472 "%s: not kicking state machine for silent group", __func__);
2473 }
2474 CTR1(KTR_IGMPV3, "%s: nothing to do", __func__);
2475 inm_commit(inm);
2476 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2477 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2478 return (0);
2479 }
2480
2481 mbufq_drain(&inm->inm_scq);
2482
2483 retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0);
2484 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval);
2485 if (retval <= 0)
2486 return (-retval);
2487
2488 /*
2489 * If record(s) were enqueued, start the state-change
2490 * report timer for this group.
2491 */
2492 inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv);
2493 inm->inm_sctimer = 1;
2494 V_state_change_timers_running = 1;
2495
2496 return (0);
2497 }
2498
2499 /*
2500 * Perform the final leave for an IGMP group.
2501 *
2502 * When leaving a group:
2503 * IGMPv1 does nothing.
2504 * IGMPv2 sends a host leave message, if and only if we are the reporter.
2505 * IGMPv3 enqueues a state-change report containing a transition
2506 * to INCLUDE {} for immediate transmission.
2507 */
2508 static void
2509 igmp_final_leave(struct in_multi *inm, struct igmp_ifsoftc *igi)
2510 {
2511 int syncstates;
2512
2513 syncstates = 1;
2514
2515 CTR4(KTR_IGMPV3, "%s: final leave 0x%08x on ifp %p(%s)",
2516 __func__, ntohl(inm->inm_addr.s_addr), inm->inm_ifp,
2517 inm->inm_ifp->if_xname);
2518
2519 IN_MULTI_LIST_LOCK_ASSERT();
2520 IGMP_LOCK_ASSERT();
2521
2522 switch (inm->inm_state) {
2523 case IGMP_NOT_MEMBER:
2524 case IGMP_SILENT_MEMBER:
2525 case IGMP_LEAVING_MEMBER:
2526 /* Already leaving or left; do nothing. */
2527 CTR1(KTR_IGMPV3,
2528 "%s: not kicking state machine for silent group", __func__);
2529 break;
2530 case IGMP_REPORTING_MEMBER:
2531 case IGMP_IDLE_MEMBER:
2532 case IGMP_G_QUERY_PENDING_MEMBER:
2533 case IGMP_SG_QUERY_PENDING_MEMBER:
2534 if (igi->igi_version == IGMP_VERSION_2) {
2535 #ifdef INVARIANTS
2536 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
2537 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
2538 panic("%s: IGMPv3 state reached, not IGMPv3 mode",
2539 __func__);
2540 #endif
2541 igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE);
2542 inm->inm_state = IGMP_NOT_MEMBER;
2543 } else if (igi->igi_version == IGMP_VERSION_3) {
2544 /*
2545 * Stop group timer and all pending reports.
2546 * Immediately enqueue a state-change report
2547 * TO_IN {} to be sent on the next fast timeout,
2548 * giving us an opportunity to merge reports.
2549 */
2550 mbufq_drain(&inm->inm_scq);
2551 inm->inm_timer = 0;
2552 if (igi->igi_flags & IGIF_LOOPBACK) {
2553 inm->inm_scrv = 1;
2554 } else {
2555 inm->inm_scrv = igi->igi_rv;
2556 }
2557 CTR4(KTR_IGMPV3, "%s: Leaving 0x%08x/%s with %d "
2558 "pending retransmissions.", __func__,
2559 ntohl(inm->inm_addr.s_addr),
2560 inm->inm_ifp->if_xname, inm->inm_scrv);
2561 if (inm->inm_scrv == 0) {
2562 inm->inm_state = IGMP_NOT_MEMBER;
2563 inm->inm_sctimer = 0;
2564 } else {
2565 int retval __unused;
2566
2567 inm_acquire_locked(inm);
2568
2569 retval = igmp_v3_enqueue_group_record(
2570 &inm->inm_scq, inm, 1, 0, 0);
2571 KASSERT(retval != 0,
2572 ("%s: enqueue record = %d", __func__,
2573 retval));
2574
2575 inm->inm_state = IGMP_LEAVING_MEMBER;
2576 inm->inm_sctimer = 1;
2577 V_state_change_timers_running = 1;
2578 syncstates = 0;
2579 }
2580 break;
2581 }
2582 break;
2583 case IGMP_LAZY_MEMBER:
2584 case IGMP_SLEEPING_MEMBER:
2585 case IGMP_AWAKENING_MEMBER:
2586 /* Our reports are suppressed; do nothing. */
2587 break;
2588 }
2589
2590 if (syncstates) {
2591 inm_commit(inm);
2592 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2593 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2594 inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
2595 CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for 0x%08x/%s",
2596 __func__, ntohl(inm->inm_addr.s_addr),
2597 inm->inm_ifp->if_xname);
2598 }
2599 }
2600
2601 /*
2602 * Enqueue an IGMPv3 group record to the given output queue.
2603 *
2604 * XXX This function could do with having the allocation code
2605 * split out, and the multiple-tree-walks coalesced into a single
2606 * routine as has been done in igmp_v3_enqueue_filter_change().
2607 *
2608 * If is_state_change is zero, a current-state record is appended.
2609 * If is_state_change is non-zero, a state-change report is appended.
2610 *
2611 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2612 * If is_group_query is zero, and if there is a packet with free space
2613 * at the tail of the queue, it will be appended to providing there
2614 * is enough free space.
2615 * Otherwise a new mbuf packet chain is allocated.
2616 *
2617 * If is_source_query is non-zero, each source is checked to see if
2618 * it was recorded for a Group-Source query, and will be omitted if
2619 * it is not both in-mode and recorded.
2620 *
2621 * The function will attempt to allocate leading space in the packet
2622 * for the IP/IGMP header to be prepended without fragmenting the chain.
2623 *
2624 * If successful the size of all data appended to the queue is returned,
2625 * otherwise an error code less than zero is returned, or zero if
2626 * no record(s) were appended.
2627 */
2628 static int
2629 igmp_v3_enqueue_group_record(struct mbufq *mq, struct in_multi *inm,
2630 const int is_state_change, const int is_group_query,
2631 const int is_source_query)
2632 {
2633 struct igmp_grouprec ig;
2634 struct igmp_grouprec *pig;
2635 struct ifnet *ifp;
2636 struct ip_msource *ims, *nims;
2637 struct mbuf *m0, *m, *md;
2638 int is_filter_list_change;
2639 int minrec0len, m0srcs, msrcs, nbytes, off;
2640 int record_has_sources;
2641 int now;
2642 int type;
2643 in_addr_t naddr;
2644 uint8_t mode;
2645
2646 IN_MULTI_LIST_LOCK_ASSERT();
2647
2648 ifp = inm->inm_ifp;
2649 is_filter_list_change = 0;
2650 m = NULL;
2651 m0 = NULL;
2652 m0srcs = 0;
2653 msrcs = 0;
2654 nbytes = 0;
2655 nims = NULL;
2656 record_has_sources = 1;
2657 pig = NULL;
2658 type = IGMP_DO_NOTHING;
2659 mode = inm->inm_st[1].iss_fmode;
2660
2661 /*
2662 * If we did not transition out of ASM mode during t0->t1,
2663 * and there are no source nodes to process, we can skip
2664 * the generation of source records.
2665 */
2666 if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 &&
2667 inm->inm_nsrc == 0)
2668 record_has_sources = 0;
2669
2670 if (is_state_change) {
2671 /*
2672 * Queue a state change record.
2673 * If the mode did not change, and there are non-ASM
2674 * listeners or source filters present,
2675 * we potentially need to issue two records for the group.
2676 * If we are transitioning to MCAST_UNDEFINED, we need
2677 * not send any sources.
2678 * If there are ASM listeners, and there was no filter
2679 * mode transition of any kind, do nothing.
2680 */
2681 if (mode != inm->inm_st[0].iss_fmode) {
2682 if (mode == MCAST_EXCLUDE) {
2683 CTR1(KTR_IGMPV3, "%s: change to EXCLUDE",
2684 __func__);
2685 type = IGMP_CHANGE_TO_EXCLUDE_MODE;
2686 } else {
2687 CTR1(KTR_IGMPV3, "%s: change to INCLUDE",
2688 __func__);
2689 type = IGMP_CHANGE_TO_INCLUDE_MODE;
2690 if (mode == MCAST_UNDEFINED)
2691 record_has_sources = 0;
2692 }
2693 } else {
2694 if (record_has_sources) {
2695 is_filter_list_change = 1;
2696 } else {
2697 type = IGMP_DO_NOTHING;
2698 }
2699 }
2700 } else {
2701 /*
2702 * Queue a current state record.
2703 */
2704 if (mode == MCAST_EXCLUDE) {
2705 type = IGMP_MODE_IS_EXCLUDE;
2706 } else if (mode == MCAST_INCLUDE) {
2707 type = IGMP_MODE_IS_INCLUDE;
2708 KASSERT(inm->inm_st[1].iss_asm == 0,
2709 ("%s: inm %p is INCLUDE but ASM count is %d",
2710 __func__, inm, inm->inm_st[1].iss_asm));
2711 }
2712 }
2713
2714 /*
2715 * Generate the filter list changes using a separate function.
2716 */
2717 if (is_filter_list_change)
2718 return (igmp_v3_enqueue_filter_change(mq, inm));
2719
2720 if (type == IGMP_DO_NOTHING) {
2721 CTR3(KTR_IGMPV3, "%s: nothing to do for 0x%08x/%s", __func__,
2722 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2723 return (0);
2724 }
2725
2726 /*
2727 * If any sources are present, we must be able to fit at least
2728 * one in the trailing space of the tail packet's mbuf,
2729 * ideally more.
2730 */
2731 minrec0len = sizeof(struct igmp_grouprec);
2732 if (record_has_sources)
2733 minrec0len += sizeof(in_addr_t);
2734
2735 CTR4(KTR_IGMPV3, "%s: queueing %s for 0x%08x/%s", __func__,
2736 igmp_rec_type_to_str(type), ntohl(inm->inm_addr.s_addr),
2737 inm->inm_ifp->if_xname);
2738
2739 /*
2740 * Check if we have a packet in the tail of the queue for this
2741 * group into which the first group record for this group will fit.
2742 * Otherwise allocate a new packet.
2743 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT.
2744 * Note: Group records for G/GSR query responses MUST be sent
2745 * in their own packet.
2746 */
2747 m0 = mbufq_last(mq);
2748 if (!is_group_query &&
2749 m0 != NULL &&
2750 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) &&
2751 (m0->m_pkthdr.len + minrec0len) <
2752 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
2753 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2754 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2755 m = m0;
2756 CTR1(KTR_IGMPV3, "%s: use existing packet", __func__);
2757 } else {
2758 if (mbufq_full(mq)) {
2759 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2760 return (-ENOMEM);
2761 }
2762 m = NULL;
2763 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2764 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2765 if (!is_state_change && !is_group_query) {
2766 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2767 if (m)
2768 m->m_data += IGMP_LEADINGSPACE;
2769 }
2770 if (m == NULL) {
2771 m = m_gethdr(M_NOWAIT, MT_DATA);
2772 if (m)
2773 M_ALIGN(m, IGMP_LEADINGSPACE);
2774 }
2775 if (m == NULL)
2776 return (-ENOMEM);
2777
2778 igmp_save_context(m, ifp);
2779
2780 CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__);
2781 }
2782
2783 /*
2784 * Append group record.
2785 * If we have sources, we don't know how many yet.
2786 */
2787 ig.ig_type = type;
2788 ig.ig_datalen = 0;
2789 ig.ig_numsrc = 0;
2790 ig.ig_group = inm->inm_addr;
2791 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2792 if (m != m0)
2793 m_freem(m);
2794 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2795 return (-ENOMEM);
2796 }
2797 nbytes += sizeof(struct igmp_grouprec);
2798
2799 /*
2800 * Append as many sources as will fit in the first packet.
2801 * If we are appending to a new packet, the chain allocation
2802 * may potentially use clusters; use m_getptr() in this case.
2803 * If we are appending to an existing packet, we need to obtain
2804 * a pointer to the group record after m_append(), in case a new
2805 * mbuf was allocated.
2806 * Only append sources which are in-mode at t1. If we are
2807 * transitioning to MCAST_UNDEFINED state on the group, do not
2808 * include source entries.
2809 * Only report recorded sources in our filter set when responding
2810 * to a group-source query.
2811 */
2812 if (record_has_sources) {
2813 if (m == m0) {
2814 md = m_last(m);
2815 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2816 md->m_len - nbytes);
2817 } else {
2818 md = m_getptr(m, 0, &off);
2819 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2820 off);
2821 }
2822 msrcs = 0;
2823 RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) {
2824 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x", __func__,
2825 ims->ims_haddr);
2826 now = ims_get_mode(inm, ims, 1);
2827 CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now);
2828 if ((now != mode) ||
2829 (now == mode && mode == MCAST_UNDEFINED)) {
2830 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2831 continue;
2832 }
2833 if (is_source_query && ims->ims_stp == 0) {
2834 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2835 __func__);
2836 continue;
2837 }
2838 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2839 naddr = htonl(ims->ims_haddr);
2840 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2841 if (m != m0)
2842 m_freem(m);
2843 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2844 __func__);
2845 return (-ENOMEM);
2846 }
2847 nbytes += sizeof(in_addr_t);
2848 ++msrcs;
2849 if (msrcs == m0srcs)
2850 break;
2851 }
2852 CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__,
2853 msrcs);
2854 pig->ig_numsrc = htons(msrcs);
2855 nbytes += (msrcs * sizeof(in_addr_t));
2856 }
2857
2858 if (is_source_query && msrcs == 0) {
2859 CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__);
2860 if (m != m0)
2861 m_freem(m);
2862 return (0);
2863 }
2864
2865 /*
2866 * We are good to go with first packet.
2867 */
2868 if (m != m0) {
2869 CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__);
2870 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2871 mbufq_enqueue(mq, m);
2872 } else
2873 m->m_pkthdr.PH_vt.vt_nrecs++;
2874
2875 /*
2876 * No further work needed if no source list in packet(s).
2877 */
2878 if (!record_has_sources)
2879 return (nbytes);
2880
2881 /*
2882 * Whilst sources remain to be announced, we need to allocate
2883 * a new packet and fill out as many sources as will fit.
2884 * Always try for a cluster first.
2885 */
2886 while (nims != NULL) {
2887 if (mbufq_full(mq)) {
2888 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2889 return (-ENOMEM);
2890 }
2891 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2892 if (m)
2893 m->m_data += IGMP_LEADINGSPACE;
2894 if (m == NULL) {
2895 m = m_gethdr(M_NOWAIT, MT_DATA);
2896 if (m)
2897 M_ALIGN(m, IGMP_LEADINGSPACE);
2898 }
2899 if (m == NULL)
2900 return (-ENOMEM);
2901 igmp_save_context(m, ifp);
2902 md = m_getptr(m, 0, &off);
2903 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off);
2904 CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__);
2905
2906 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2907 if (m != m0)
2908 m_freem(m);
2909 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2910 return (-ENOMEM);
2911 }
2912 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2913 nbytes += sizeof(struct igmp_grouprec);
2914
2915 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2916 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2917
2918 msrcs = 0;
2919 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
2920 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x", __func__,
2921 ims->ims_haddr);
2922 now = ims_get_mode(inm, ims, 1);
2923 if ((now != mode) ||
2924 (now == mode && mode == MCAST_UNDEFINED)) {
2925 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2926 continue;
2927 }
2928 if (is_source_query && ims->ims_stp == 0) {
2929 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2930 __func__);
2931 continue;
2932 }
2933 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2934 naddr = htonl(ims->ims_haddr);
2935 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2936 if (m != m0)
2937 m_freem(m);
2938 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2939 __func__);
2940 return (-ENOMEM);
2941 }
2942 ++msrcs;
2943 if (msrcs == m0srcs)
2944 break;
2945 }
2946 pig->ig_numsrc = htons(msrcs);
2947 nbytes += (msrcs * sizeof(in_addr_t));
2948
2949 CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__);
2950 mbufq_enqueue(mq, m);
2951 }
2952
2953 return (nbytes);
2954 }
2955
2956 /*
2957 * Type used to mark record pass completion.
2958 * We exploit the fact we can cast to this easily from the
2959 * current filter modes on each ip_msource node.
2960 */
2961 typedef enum {
2962 REC_NONE = 0x00, /* MCAST_UNDEFINED */
2963 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
2964 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
2965 REC_FULL = REC_ALLOW | REC_BLOCK
2966 } rectype_t;
2967
2968 /*
2969 * Enqueue an IGMPv3 filter list change to the given output queue.
2970 *
2971 * Source list filter state is held in an RB-tree. When the filter list
2972 * for a group is changed without changing its mode, we need to compute
2973 * the deltas between T0 and T1 for each source in the filter set,
2974 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
2975 *
2976 * As we may potentially queue two record types, and the entire R-B tree
2977 * needs to be walked at once, we break this out into its own function
2978 * so we can generate a tightly packed queue of packets.
2979 *
2980 * XXX This could be written to only use one tree walk, although that makes
2981 * serializing into the mbuf chains a bit harder. For now we do two walks
2982 * which makes things easier on us, and it may or may not be harder on
2983 * the L2 cache.
2984 *
2985 * If successful the size of all data appended to the queue is returned,
2986 * otherwise an error code less than zero is returned, or zero if
2987 * no record(s) were appended.
2988 */
2989 static int
2990 igmp_v3_enqueue_filter_change(struct mbufq *mq, struct in_multi *inm)
2991 {
2992 static const int MINRECLEN =
2993 sizeof(struct igmp_grouprec) + sizeof(in_addr_t);
2994 struct ifnet *ifp;
2995 struct igmp_grouprec ig;
2996 struct igmp_grouprec *pig;
2997 struct ip_msource *ims, *nims;
2998 struct mbuf *m, *m0, *md;
2999 in_addr_t naddr;
3000 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
3001 int nallow, nblock;
3002 uint8_t mode, now, then;
3003 rectype_t crt, drt, nrt;
3004
3005 IN_MULTI_LIST_LOCK_ASSERT();
3006
3007 if (inm->inm_nsrc == 0 ||
3008 (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0))
3009 return (0);
3010
3011 ifp = inm->inm_ifp; /* interface */
3012 mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */
3013 crt = REC_NONE; /* current group record type */
3014 drt = REC_NONE; /* mask of completed group record types */
3015 nrt = REC_NONE; /* record type for current node */
3016 m0srcs = 0; /* # source which will fit in current mbuf chain */
3017 nbytes = 0; /* # of bytes appended to group's state-change queue */
3018 npbytes = 0; /* # of bytes appended this packet */
3019 rsrcs = 0; /* # sources encoded in current record */
3020 schanged = 0; /* # nodes encoded in overall filter change */
3021 nallow = 0; /* # of source entries in ALLOW_NEW */
3022 nblock = 0; /* # of source entries in BLOCK_OLD */
3023 nims = NULL; /* next tree node pointer */
3024
3025 /*
3026 * For each possible filter record mode.
3027 * The first kind of source we encounter tells us which
3028 * is the first kind of record we start appending.
3029 * If a node transitioned to UNDEFINED at t1, its mode is treated
3030 * as the inverse of the group's filter mode.
3031 */
3032 while (drt != REC_FULL) {
3033 do {
3034 m0 = mbufq_last(mq);
3035 if (m0 != NULL &&
3036 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
3037 IGMP_V3_REPORT_MAXRECS) &&
3038 (m0->m_pkthdr.len + MINRECLEN) <
3039 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
3040 m = m0;
3041 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3042 sizeof(struct igmp_grouprec)) /
3043 sizeof(in_addr_t);
3044 CTR1(KTR_IGMPV3,
3045 "%s: use previous packet", __func__);
3046 } else {
3047 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3048 if (m)
3049 m->m_data += IGMP_LEADINGSPACE;
3050 if (m == NULL) {
3051 m = m_gethdr(M_NOWAIT, MT_DATA);
3052 if (m)
3053 M_ALIGN(m, IGMP_LEADINGSPACE);
3054 }
3055 if (m == NULL) {
3056 CTR1(KTR_IGMPV3,
3057 "%s: m_get*() failed", __func__);
3058 return (-ENOMEM);
3059 }
3060 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3061 igmp_save_context(m, ifp);
3062 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
3063 sizeof(struct igmp_grouprec)) /
3064 sizeof(in_addr_t);
3065 npbytes = 0;
3066 CTR1(KTR_IGMPV3,
3067 "%s: allocated new packet", __func__);
3068 }
3069 /*
3070 * Append the IGMP group record header to the
3071 * current packet's data area.
3072 * Recalculate pointer to free space for next
3073 * group record, in case m_append() allocated
3074 * a new mbuf or cluster.
3075 */
3076 memset(&ig, 0, sizeof(ig));
3077 ig.ig_group = inm->inm_addr;
3078 if (!m_append(m, sizeof(ig), (void *)&ig)) {
3079 if (m != m0)
3080 m_freem(m);
3081 CTR1(KTR_IGMPV3,
3082 "%s: m_append() failed", __func__);
3083 return (-ENOMEM);
3084 }
3085 npbytes += sizeof(struct igmp_grouprec);
3086 if (m != m0) {
3087 /* new packet; offset in c hain */
3088 md = m_getptr(m, npbytes -
3089 sizeof(struct igmp_grouprec), &off);
3090 pig = (struct igmp_grouprec *)(mtod(md,
3091 uint8_t *) + off);
3092 } else {
3093 /* current packet; offset from last append */
3094 md = m_last(m);
3095 pig = (struct igmp_grouprec *)(mtod(md,
3096 uint8_t *) + md->m_len -
3097 sizeof(struct igmp_grouprec));
3098 }
3099 /*
3100 * Begin walking the tree for this record type
3101 * pass, or continue from where we left off
3102 * previously if we had to allocate a new packet.
3103 * Only report deltas in-mode at t1.
3104 * We need not report included sources as allowed
3105 * if we are in inclusive mode on the group,
3106 * however the converse is not true.
3107 */
3108 rsrcs = 0;
3109 if (nims == NULL)
3110 nims = RB_MIN(ip_msource_tree, &inm->inm_srcs);
3111 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
3112 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x",
3113 __func__, ims->ims_haddr);
3114 now = ims_get_mode(inm, ims, 1);
3115 then = ims_get_mode(inm, ims, 0);
3116 CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d",
3117 __func__, then, now);
3118 if (now == then) {
3119 CTR1(KTR_IGMPV3,
3120 "%s: skip unchanged", __func__);
3121 continue;
3122 }
3123 if (mode == MCAST_EXCLUDE &&
3124 now == MCAST_INCLUDE) {
3125 CTR1(KTR_IGMPV3,
3126 "%s: skip IN src on EX group",
3127 __func__);
3128 continue;
3129 }
3130 nrt = (rectype_t)now;
3131 if (nrt == REC_NONE)
3132 nrt = (rectype_t)(~mode & REC_FULL);
3133 if (schanged++ == 0) {
3134 crt = nrt;
3135 } else if (crt != nrt)
3136 continue;
3137 naddr = htonl(ims->ims_haddr);
3138 if (!m_append(m, sizeof(in_addr_t),
3139 (void *)&naddr)) {
3140 if (m != m0)
3141 m_freem(m);
3142 CTR1(KTR_IGMPV3,
3143 "%s: m_append() failed", __func__);
3144 return (-ENOMEM);
3145 }
3146 nallow += !!(crt == REC_ALLOW);
3147 nblock += !!(crt == REC_BLOCK);
3148 if (++rsrcs == m0srcs)
3149 break;
3150 }
3151 /*
3152 * If we did not append any tree nodes on this
3153 * pass, back out of allocations.
3154 */
3155 if (rsrcs == 0) {
3156 npbytes -= sizeof(struct igmp_grouprec);
3157 if (m != m0) {
3158 CTR1(KTR_IGMPV3,
3159 "%s: m_free(m)", __func__);
3160 m_freem(m);
3161 } else {
3162 CTR1(KTR_IGMPV3,
3163 "%s: m_adj(m, -ig)", __func__);
3164 m_adj(m, -((int)sizeof(
3165 struct igmp_grouprec)));
3166 }
3167 continue;
3168 }
3169 npbytes += (rsrcs * sizeof(in_addr_t));
3170 if (crt == REC_ALLOW)
3171 pig->ig_type = IGMP_ALLOW_NEW_SOURCES;
3172 else if (crt == REC_BLOCK)
3173 pig->ig_type = IGMP_BLOCK_OLD_SOURCES;
3174 pig->ig_numsrc = htons(rsrcs);
3175 /*
3176 * Count the new group record, and enqueue this
3177 * packet if it wasn't already queued.
3178 */
3179 m->m_pkthdr.PH_vt.vt_nrecs++;
3180 if (m != m0)
3181 mbufq_enqueue(mq, m);
3182 nbytes += npbytes;
3183 } while (nims != NULL);
3184 drt |= crt;
3185 crt = (~crt & REC_FULL);
3186 }
3187
3188 CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
3189 nallow, nblock);
3190
3191 return (nbytes);
3192 }
3193
3194 static int
3195 igmp_v3_merge_state_changes(struct in_multi *inm, struct mbufq *scq)
3196 {
3197 struct mbufq *gq;
3198 struct mbuf *m; /* pending state-change */
3199 struct mbuf *m0; /* copy of pending state-change */
3200 struct mbuf *mt; /* last state-change in packet */
3201 int docopy, domerge;
3202 u_int recslen;
3203
3204 docopy = 0;
3205 domerge = 0;
3206 recslen = 0;
3207
3208 IN_MULTI_LIST_LOCK_ASSERT();
3209 IGMP_LOCK_ASSERT();
3210
3211 /*
3212 * If there are further pending retransmissions, make a writable
3213 * copy of each queued state-change message before merging.
3214 */
3215 if (inm->inm_scrv > 0)
3216 docopy = 1;
3217
3218 gq = &inm->inm_scq;
3219 #ifdef KTR
3220 if (mbufq_first(gq) == NULL) {
3221 CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty",
3222 __func__, inm);
3223 }
3224 #endif
3225
3226 m = mbufq_first(gq);
3227 while (m != NULL) {
3228 /*
3229 * Only merge the report into the current packet if
3230 * there is sufficient space to do so; an IGMPv3 report
3231 * packet may only contain 65,535 group records.
3232 * Always use a simple mbuf chain concatentation to do this,
3233 * as large state changes for single groups may have
3234 * allocated clusters.
3235 */
3236 domerge = 0;
3237 mt = mbufq_last(scq);
3238 if (mt != NULL) {
3239 recslen = m_length(m, NULL);
3240
3241 if ((mt->m_pkthdr.PH_vt.vt_nrecs +
3242 m->m_pkthdr.PH_vt.vt_nrecs <=
3243 IGMP_V3_REPORT_MAXRECS) &&
3244 (mt->m_pkthdr.len + recslen <=
3245 (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE)))
3246 domerge = 1;
3247 }
3248
3249 if (!domerge && mbufq_full(gq)) {
3250 CTR2(KTR_IGMPV3,
3251 "%s: outbound queue full, skipping whole packet %p",
3252 __func__, m);
3253 mt = m->m_nextpkt;
3254 if (!docopy)
3255 m_freem(m);
3256 m = mt;
3257 continue;
3258 }
3259
3260 if (!docopy) {
3261 CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m);
3262 m0 = mbufq_dequeue(gq);
3263 m = m0->m_nextpkt;
3264 } else {
3265 CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m);
3266 m0 = m_dup(m, M_NOWAIT);
3267 if (m0 == NULL)
3268 return (ENOMEM);
3269 m0->m_nextpkt = NULL;
3270 m = m->m_nextpkt;
3271 }
3272
3273 if (!domerge) {
3274 CTR3(KTR_IGMPV3, "%s: queueing %p to scq %p)",
3275 __func__, m0, scq);
3276 mbufq_enqueue(scq, m0);
3277 } else {
3278 struct mbuf *mtl; /* last mbuf of packet mt */
3279
3280 CTR3(KTR_IGMPV3, "%s: merging %p with scq tail %p)",
3281 __func__, m0, mt);
3282
3283 mtl = m_last(mt);
3284 m0->m_flags &= ~M_PKTHDR;
3285 mt->m_pkthdr.len += recslen;
3286 mt->m_pkthdr.PH_vt.vt_nrecs +=
3287 m0->m_pkthdr.PH_vt.vt_nrecs;
3288
3289 mtl->m_next = m0;
3290 }
3291 }
3292
3293 return (0);
3294 }
3295
3296 /*
3297 * Respond to a pending IGMPv3 General Query.
3298 */
3299 static void
3300 igmp_v3_dispatch_general_query(struct igmp_ifsoftc *igi)
3301 {
3302 struct ifmultiaddr *ifma;
3303 struct ifnet *ifp;
3304 struct in_multi *inm;
3305 int retval __unused, loop;
3306
3307 IN_MULTI_LIST_LOCK_ASSERT();
3308 IGMP_LOCK_ASSERT();
3309
3310 KASSERT(igi->igi_version == IGMP_VERSION_3,
3311 ("%s: called when version %d", __func__, igi->igi_version));
3312
3313 /*
3314 * Check that there are some packets queued. If so, send them first.
3315 * For large number of groups the reply to general query can take
3316 * many packets, we should finish sending them before starting of
3317 * queuing the new reply.
3318 */
3319 if (mbufq_len(&igi->igi_gq) != 0)
3320 goto send;
3321
3322 ifp = igi->igi_ifp;
3323
3324 IF_ADDR_RLOCK(ifp);
3325 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3326 if (ifma->ifma_addr->sa_family != AF_INET ||
3327 ifma->ifma_protospec == NULL)
3328 continue;
3329
3330 inm = (struct in_multi *)ifma->ifma_protospec;
3331 KASSERT(ifp == inm->inm_ifp,
3332 ("%s: inconsistent ifp", __func__));
3333
3334 switch (inm->inm_state) {
3335 case IGMP_NOT_MEMBER:
3336 case IGMP_SILENT_MEMBER:
3337 break;
3338 case IGMP_REPORTING_MEMBER:
3339 case IGMP_IDLE_MEMBER:
3340 case IGMP_LAZY_MEMBER:
3341 case IGMP_SLEEPING_MEMBER:
3342 case IGMP_AWAKENING_MEMBER:
3343 inm->inm_state = IGMP_REPORTING_MEMBER;
3344 retval = igmp_v3_enqueue_group_record(&igi->igi_gq,
3345 inm, 0, 0, 0);
3346 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
3347 __func__, retval);
3348 break;
3349 case IGMP_G_QUERY_PENDING_MEMBER:
3350 case IGMP_SG_QUERY_PENDING_MEMBER:
3351 case IGMP_LEAVING_MEMBER:
3352 break;
3353 }
3354 }
3355 IF_ADDR_RUNLOCK(ifp);
3356
3357 send:
3358 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
3359 igmp_dispatch_queue(&igi->igi_gq, IGMP_MAX_RESPONSE_BURST, loop);
3360
3361 /*
3362 * Slew transmission of bursts over 500ms intervals.
3363 */
3364 if (mbufq_first(&igi->igi_gq) != NULL) {
3365 igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY(
3366 IGMP_RESPONSE_BURST_INTERVAL);
3367 V_interface_timers_running = 1;
3368 }
3369 }
3370
3371 /*
3372 * Transmit the next pending IGMP message in the output queue.
3373 *
3374 * We get called from netisr_processqueue(). A mutex private to igmpoq
3375 * will be acquired and released around this routine.
3376 *
3377 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3378 * MRT: Nothing needs to be done, as IGMP traffic is always local to
3379 * a link and uses a link-scope multicast address.
3380 */
3381 static void
3382 igmp_intr(struct mbuf *m)
3383 {
3384 struct ip_moptions imo;
3385 struct ifnet *ifp;
3386 struct mbuf *ipopts, *m0;
3387 int error;
3388 uint32_t ifindex;
3389
3390 CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m);
3391
3392 /*
3393 * Set VNET image pointer from enqueued mbuf chain
3394 * before doing anything else. Whilst we use interface
3395 * indexes to guard against interface detach, they are
3396 * unique to each VIMAGE and must be retrieved.
3397 */
3398 CURVNET_SET((struct vnet *)(m->m_pkthdr.PH_loc.ptr));
3399 ifindex = igmp_restore_context(m);
3400
3401 /*
3402 * Check if the ifnet still exists. This limits the scope of
3403 * any race in the absence of a global ifp lock for low cost
3404 * (an array lookup).
3405 */
3406 ifp = ifnet_byindex(ifindex);
3407 if (ifp == NULL) {
3408 CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.",
3409 __func__, m, ifindex);
3410 m_freem(m);
3411 IPSTAT_INC(ips_noroute);
3412 goto out;
3413 }
3414
3415 ipopts = V_igmp_sendra ? m_raopt : NULL;
3416
3417 imo.imo_multicast_ttl = 1;
3418 imo.imo_multicast_vif = -1;
3419 imo.imo_multicast_loop = (V_ip_mrouter != NULL);
3420
3421 /*
3422 * If the user requested that IGMP traffic be explicitly
3423 * redirected to the loopback interface (e.g. they are running a
3424 * MANET interface and the routing protocol needs to see the
3425 * updates), handle this now.
3426 */
3427 if (m->m_flags & M_IGMP_LOOP)
3428 imo.imo_multicast_ifp = V_loif;
3429 else
3430 imo.imo_multicast_ifp = ifp;
3431
3432 if (m->m_flags & M_IGMPV2) {
3433 m0 = m;
3434 } else {
3435 m0 = igmp_v3_encap_report(ifp, m);
3436 if (m0 == NULL) {
3437 CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m);
3438 m_freem(m);
3439 IPSTAT_INC(ips_odropped);
3440 goto out;
3441 }
3442 }
3443
3444 igmp_scrub_context(m0);
3445 m_clrprotoflags(m);
3446 m0->m_pkthdr.rcvif = V_loif;
3447 #ifdef MAC
3448 mac_netinet_igmp_send(ifp, m0);
3449 #endif
3450 error = ip_output(m0, ipopts, NULL, 0, &imo, NULL);
3451 if (error) {
3452 CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error);
3453 goto out;
3454 }
3455
3456 IGMPSTAT_INC(igps_snd_reports);
3457
3458 out:
3459 /*
3460 * We must restore the existing vnet pointer before
3461 * continuing as we are run from netisr context.
3462 */
3463 CURVNET_RESTORE();
3464 }
3465
3466 /*
3467 * Encapsulate an IGMPv3 report.
3468 *
3469 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf
3470 * chain has already had its IP/IGMPv3 header prepended. In this case
3471 * the function will not attempt to prepend; the lengths and checksums
3472 * will however be re-computed.
3473 *
3474 * Returns a pointer to the new mbuf chain head, or NULL if the
3475 * allocation failed.
3476 */
3477 static struct mbuf *
3478 igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m)
3479 {
3480 struct rm_priotracker in_ifa_tracker;
3481 struct igmp_report *igmp;
3482 struct ip *ip;
3483 int hdrlen, igmpreclen;
3484
3485 KASSERT((m->m_flags & M_PKTHDR),
3486 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3487
3488 igmpreclen = m_length(m, NULL);
3489 hdrlen = sizeof(struct ip) + sizeof(struct igmp_report);
3490
3491 if (m->m_flags & M_IGMPV3_HDR) {
3492 igmpreclen -= hdrlen;
3493 } else {
3494 M_PREPEND(m, hdrlen, M_NOWAIT);
3495 if (m == NULL)
3496 return (NULL);
3497 m->m_flags |= M_IGMPV3_HDR;
3498 }
3499
3500 CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen);
3501
3502 m->m_data += sizeof(struct ip);
3503 m->m_len -= sizeof(struct ip);
3504
3505 igmp = mtod(m, struct igmp_report *);
3506 igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT;
3507 igmp->ir_rsv1 = 0;
3508 igmp->ir_rsv2 = 0;
3509 igmp->ir_numgrps = htons(m->m_pkthdr.PH_vt.vt_nrecs);
3510 igmp->ir_cksum = 0;
3511 igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen);
3512 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3513
3514 m->m_data -= sizeof(struct ip);
3515 m->m_len += sizeof(struct ip);
3516
3517 ip = mtod(m, struct ip *);
3518 ip->ip_tos = IPTOS_PREC_INTERNETCONTROL;
3519 ip->ip_len = htons(hdrlen + igmpreclen);
3520 ip->ip_off = htons(IP_DF);
3521 ip->ip_p = IPPROTO_IGMP;
3522 ip->ip_sum = 0;
3523
3524 ip->ip_src.s_addr = INADDR_ANY;
3525
3526 if (m->m_flags & M_IGMP_LOOP) {
3527 struct in_ifaddr *ia;
3528
3529 NET_EPOCH_ENTER();
3530 IFP_TO_IA(ifp, ia, &in_ifa_tracker);
3531 if (ia != NULL)
3532 ip->ip_src = ia->ia_addr.sin_addr;
3533 NET_EPOCH_EXIT();
3534 }
3535
3536 ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP);
3537
3538 return (m);
3539 }
3540
3541 #ifdef KTR
3542 static char *
3543 igmp_rec_type_to_str(const int type)
3544 {
3545
3546 switch (type) {
3547 case IGMP_CHANGE_TO_EXCLUDE_MODE:
3548 return "TO_EX";
3549 break;
3550 case IGMP_CHANGE_TO_INCLUDE_MODE:
3551 return "TO_IN";
3552 break;
3553 case IGMP_MODE_IS_EXCLUDE:
3554 return "MODE_EX";
3555 break;
3556 case IGMP_MODE_IS_INCLUDE:
3557 return "MODE_IN";
3558 break;
3559 case IGMP_ALLOW_NEW_SOURCES:
3560 return "ALLOW_NEW";
3561 break;
3562 case IGMP_BLOCK_OLD_SOURCES:
3563 return "BLOCK_OLD";
3564 break;
3565 default:
3566 break;
3567 }
3568 return "unknown";
3569 }
3570 #endif
3571
3572 #ifdef VIMAGE
3573 static void
3574 vnet_igmp_init(const void *unused __unused)
3575 {
3576
3577 netisr_register_vnet(&igmp_nh);
3578 }
3579 VNET_SYSINIT(vnet_igmp_init, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3580 vnet_igmp_init, NULL);
3581
3582 static void
3583 vnet_igmp_uninit(const void *unused __unused)
3584 {
3585
3586 /* This can happen when we shutdown the entire network stack. */
3587 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3588
3589 netisr_unregister_vnet(&igmp_nh);
3590 }
3591 VNET_SYSUNINIT(vnet_igmp_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3592 vnet_igmp_uninit, NULL);
3593 #endif
3594
3595 #ifdef DDB
3596 DB_SHOW_COMMAND(igi_list, db_show_igi_list)
3597 {
3598 struct igmp_ifsoftc *igi, *tigi;
3599 LIST_HEAD(_igi_list, igmp_ifsoftc) *igi_head;
3600
3601 if (!have_addr) {
3602 db_printf("usage: show igi_list <addr>\n");
3603 return;
3604 }
3605 igi_head = (struct _igi_list *)addr;
3606
3607 LIST_FOREACH_SAFE(igi, igi_head, igi_link, tigi) {
3608 db_printf("igmp_ifsoftc %p:\n", igi);
3609 db_printf(" ifp %p\n", igi->igi_ifp);
3610 db_printf(" version %u\n", igi->igi_version);
3611 db_printf(" v1_timer %u\n", igi->igi_v1_timer);
3612 db_printf(" v2_timer %u\n", igi->igi_v2_timer);
3613 db_printf(" v3_timer %u\n", igi->igi_v3_timer);
3614 db_printf(" flags %#x\n", igi->igi_flags);
3615 db_printf(" rv %u\n", igi->igi_rv);
3616 db_printf(" qi %u\n", igi->igi_qi);
3617 db_printf(" qri %u\n", igi->igi_qri);
3618 db_printf(" uri %u\n", igi->igi_uri);
3619 /* struct mbufq igi_gq; */
3620 db_printf("\n");
3621 }
3622 }
3623 #endif
3624
3625 static int
3626 igmp_modevent(module_t mod, int type, void *unused __unused)
3627 {
3628
3629 switch (type) {
3630 case MOD_LOAD:
3631 CTR1(KTR_IGMPV3, "%s: initializing", __func__);
3632 IGMP_LOCK_INIT();
3633 m_raopt = igmp_ra_alloc();
3634 netisr_register(&igmp_nh);
3635 break;
3636 case MOD_UNLOAD:
3637 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3638 netisr_unregister(&igmp_nh);
3639 m_free(m_raopt);
3640 m_raopt = NULL;
3641 IGMP_LOCK_DESTROY();
3642 break;
3643 default:
3644 return (EOPNOTSUPP);
3645 }
3646 return (0);
3647 }
3648
3649 static moduledata_t igmp_mod = {
3650 "igmp",
3651 igmp_modevent,
3652 0
3653 };
3654 DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE);
Cache object: 37090fb44c1705737ce074e037cc1797
|