FreeBSD/Linux Kernel Cross Reference
sys/netinet/igmp.c
1 /*-
2 * Copyright (c) 2007-2009 Bruce Simpson.
3 * Copyright (c) 1988 Stephen Deering.
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Stephen Deering of Stanford University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
35 */
36
37 /*
38 * Internet Group Management Protocol (IGMP) routines.
39 * [RFC1112, RFC2236, RFC3376]
40 *
41 * Written by Steve Deering, Stanford, May 1988.
42 * Modified by Rosen Sharma, Stanford, Aug 1994.
43 * Modified by Bill Fenner, Xerox PARC, Feb 1995.
44 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995.
45 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson.
46 *
47 * MULTICAST Revision: 3.5.1.4
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD: releng/11.0/sys/netinet/igmp.c 302054 2016-06-21 13:48:49Z bz $");
52
53 #include "opt_ddb.h"
54
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/module.h>
58 #include <sys/malloc.h>
59 #include <sys/mbuf.h>
60 #include <sys/socket.h>
61 #include <sys/protosw.h>
62 #include <sys/kernel.h>
63 #include <sys/lock.h>
64 #include <sys/rmlock.h>
65 #include <sys/sysctl.h>
66 #include <sys/ktr.h>
67 #include <sys/condvar.h>
68
69 #ifdef DDB
70 #include <ddb/ddb.h>
71 #endif
72
73 #include <net/if.h>
74 #include <net/if_var.h>
75 #include <net/netisr.h>
76 #include <net/vnet.h>
77
78 #include <netinet/in.h>
79 #include <netinet/in_var.h>
80 #include <netinet/in_systm.h>
81 #include <netinet/ip.h>
82 #include <netinet/ip_var.h>
83 #include <netinet/ip_options.h>
84 #include <netinet/igmp.h>
85 #include <netinet/igmp_var.h>
86
87 #include <machine/in_cksum.h>
88
89 #include <security/mac/mac_framework.h>
90
91 #ifndef KTR_IGMPV3
92 #define KTR_IGMPV3 KTR_INET
93 #endif
94
95 static struct igmp_ifsoftc *
96 igi_alloc_locked(struct ifnet *);
97 static void igi_delete_locked(const struct ifnet *);
98 static void igmp_dispatch_queue(struct mbufq *, int, const int);
99 static void igmp_fasttimo_vnet(void);
100 static void igmp_final_leave(struct in_multi *, struct igmp_ifsoftc *);
101 static int igmp_handle_state_change(struct in_multi *,
102 struct igmp_ifsoftc *);
103 static int igmp_initial_join(struct in_multi *, struct igmp_ifsoftc *);
104 static int igmp_input_v1_query(struct ifnet *, const struct ip *,
105 const struct igmp *);
106 static int igmp_input_v2_query(struct ifnet *, const struct ip *,
107 const struct igmp *);
108 static int igmp_input_v3_query(struct ifnet *, const struct ip *,
109 /*const*/ struct igmpv3 *);
110 static int igmp_input_v3_group_query(struct in_multi *,
111 struct igmp_ifsoftc *, int, /*const*/ struct igmpv3 *);
112 static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *,
113 /*const*/ struct igmp *);
114 static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *,
115 /*const*/ struct igmp *);
116 static void igmp_intr(struct mbuf *);
117 static int igmp_isgroupreported(const struct in_addr);
118 static struct mbuf *
119 igmp_ra_alloc(void);
120 #ifdef KTR
121 static char * igmp_rec_type_to_str(const int);
122 #endif
123 static void igmp_set_version(struct igmp_ifsoftc *, const int);
124 static void igmp_slowtimo_vnet(void);
125 static int igmp_v1v2_queue_report(struct in_multi *, const int);
126 static void igmp_v1v2_process_group_timer(struct in_multi *, const int);
127 static void igmp_v1v2_process_querier_timers(struct igmp_ifsoftc *);
128 static void igmp_v2_update_group(struct in_multi *, const int);
129 static void igmp_v3_cancel_link_timers(struct igmp_ifsoftc *);
130 static void igmp_v3_dispatch_general_query(struct igmp_ifsoftc *);
131 static struct mbuf *
132 igmp_v3_encap_report(struct ifnet *, struct mbuf *);
133 static int igmp_v3_enqueue_group_record(struct mbufq *,
134 struct in_multi *, const int, const int, const int);
135 static int igmp_v3_enqueue_filter_change(struct mbufq *,
136 struct in_multi *);
137 static void igmp_v3_process_group_timers(struct igmp_ifsoftc *,
138 struct mbufq *, struct mbufq *, struct in_multi *,
139 const int);
140 static int igmp_v3_merge_state_changes(struct in_multi *,
141 struct mbufq *);
142 static void igmp_v3_suppress_group_record(struct in_multi *);
143 static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS);
144 static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS);
145 static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS);
146
147 static const struct netisr_handler igmp_nh = {
148 .nh_name = "igmp",
149 .nh_handler = igmp_intr,
150 .nh_proto = NETISR_IGMP,
151 .nh_policy = NETISR_POLICY_SOURCE,
152 };
153
154 /*
155 * System-wide globals.
156 *
157 * Unlocked access to these is OK, except for the global IGMP output
158 * queue. The IGMP subsystem lock ends up being system-wide for the moment,
159 * because all VIMAGEs have to share a global output queue, as netisrs
160 * themselves are not virtualized.
161 *
162 * Locking:
163 * * The permitted lock order is: IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
164 * Any may be taken independently; if any are held at the same
165 * time, the above lock order must be followed.
166 * * All output is delegated to the netisr.
167 * Now that Giant has been eliminated, the netisr may be inlined.
168 * * IN_MULTI_LOCK covers in_multi.
169 * * IGMP_LOCK covers igmp_ifsoftc and any global variables in this file,
170 * including the output queue.
171 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
172 * per-link state iterators.
173 * * igmp_ifsoftc is valid as long as PF_INET is attached to the interface,
174 * therefore it is not refcounted.
175 * We allow unlocked reads of igmp_ifsoftc when accessed via in_multi.
176 *
177 * Reference counting
178 * * IGMP acquires its own reference every time an in_multi is passed to
179 * it and the group is being joined for the first time.
180 * * IGMP releases its reference(s) on in_multi in a deferred way,
181 * because the operations which process the release run as part of
182 * a loop whose control variables are directly affected by the release
183 * (that, and not recursing on the IF_ADDR_LOCK).
184 *
185 * VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds
186 * to a vnet in ifp->if_vnet.
187 *
188 * SMPng: XXX We may potentially race operations on ifma_protospec.
189 * The problem is that we currently lack a clean way of taking the
190 * IF_ADDR_LOCK() between the ifnet and in layers w/o recursing,
191 * as anything which modifies ifma needs to be covered by that lock.
192 * So check for ifma_protospec being NULL before proceeding.
193 */
194 struct mtx igmp_mtx;
195
196 struct mbuf *m_raopt; /* Router Alert option */
197 static MALLOC_DEFINE(M_IGMP, "igmp", "igmp state");
198
199 /*
200 * VIMAGE-wide globals.
201 *
202 * The IGMPv3 timers themselves need to run per-image, however,
203 * protosw timers run globally (see tcp).
204 * An ifnet can only be in one vimage at a time, and the loopback
205 * ifnet, loif, is itself virtualized.
206 * It would otherwise be possible to seriously hose IGMP state,
207 * and create inconsistencies in upstream multicast routing, if you have
208 * multiple VIMAGEs running on the same link joining different multicast
209 * groups, UNLESS the "primary IP address" is different. This is because
210 * IGMP for IPv4 does not force link-local addresses to be used for each
211 * node, unlike MLD for IPv6.
212 * Obviously the IGMPv3 per-interface state has per-vimage granularity
213 * also as a result.
214 *
215 * FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection
216 * policy to control the address used by IGMP on the link.
217 */
218 static VNET_DEFINE(int, interface_timers_running); /* IGMPv3 general
219 * query response */
220 static VNET_DEFINE(int, state_change_timers_running); /* IGMPv3 state-change
221 * retransmit */
222 static VNET_DEFINE(int, current_state_timers_running); /* IGMPv1/v2 host
223 * report; IGMPv3 g/sg
224 * query response */
225
226 #define V_interface_timers_running VNET(interface_timers_running)
227 #define V_state_change_timers_running VNET(state_change_timers_running)
228 #define V_current_state_timers_running VNET(current_state_timers_running)
229
230 static VNET_DEFINE(LIST_HEAD(, igmp_ifsoftc), igi_head) =
231 LIST_HEAD_INITIALIZER(igi_head);
232 static VNET_DEFINE(struct igmpstat, igmpstat) = {
233 .igps_version = IGPS_VERSION_3,
234 .igps_len = sizeof(struct igmpstat),
235 };
236 static VNET_DEFINE(struct timeval, igmp_gsrdelay) = {10, 0};
237
238 #define V_igi_head VNET(igi_head)
239 #define V_igmpstat VNET(igmpstat)
240 #define V_igmp_gsrdelay VNET(igmp_gsrdelay)
241
242 static VNET_DEFINE(int, igmp_recvifkludge) = 1;
243 static VNET_DEFINE(int, igmp_sendra) = 1;
244 static VNET_DEFINE(int, igmp_sendlocal) = 1;
245 static VNET_DEFINE(int, igmp_v1enable) = 1;
246 static VNET_DEFINE(int, igmp_v2enable) = 1;
247 static VNET_DEFINE(int, igmp_legacysupp);
248 static VNET_DEFINE(int, igmp_default_version) = IGMP_VERSION_3;
249
250 #define V_igmp_recvifkludge VNET(igmp_recvifkludge)
251 #define V_igmp_sendra VNET(igmp_sendra)
252 #define V_igmp_sendlocal VNET(igmp_sendlocal)
253 #define V_igmp_v1enable VNET(igmp_v1enable)
254 #define V_igmp_v2enable VNET(igmp_v2enable)
255 #define V_igmp_legacysupp VNET(igmp_legacysupp)
256 #define V_igmp_default_version VNET(igmp_default_version)
257
258 /*
259 * Virtualized sysctls.
260 */
261 SYSCTL_STRUCT(_net_inet_igmp, IGMPCTL_STATS, stats, CTLFLAG_VNET | CTLFLAG_RW,
262 &VNET_NAME(igmpstat), igmpstat, "");
263 SYSCTL_INT(_net_inet_igmp, OID_AUTO, recvifkludge, CTLFLAG_VNET | CTLFLAG_RW,
264 &VNET_NAME(igmp_recvifkludge), 0,
265 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address");
266 SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendra, CTLFLAG_VNET | CTLFLAG_RW,
267 &VNET_NAME(igmp_sendra), 0,
268 "Send IP Router Alert option in IGMPv2/v3 messages");
269 SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendlocal, CTLFLAG_VNET | CTLFLAG_RW,
270 &VNET_NAME(igmp_sendlocal), 0,
271 "Send IGMP membership reports for 224.0.0.0/24 groups");
272 SYSCTL_INT(_net_inet_igmp, OID_AUTO, v1enable, CTLFLAG_VNET | CTLFLAG_RW,
273 &VNET_NAME(igmp_v1enable), 0,
274 "Enable backwards compatibility with IGMPv1");
275 SYSCTL_INT(_net_inet_igmp, OID_AUTO, v2enable, CTLFLAG_VNET | CTLFLAG_RW,
276 &VNET_NAME(igmp_v2enable), 0,
277 "Enable backwards compatibility with IGMPv2");
278 SYSCTL_INT(_net_inet_igmp, OID_AUTO, legacysupp, CTLFLAG_VNET | CTLFLAG_RW,
279 &VNET_NAME(igmp_legacysupp), 0,
280 "Allow v1/v2 reports to suppress v3 group responses");
281 SYSCTL_PROC(_net_inet_igmp, OID_AUTO, default_version,
282 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
283 &VNET_NAME(igmp_default_version), 0, sysctl_igmp_default_version, "I",
284 "Default version of IGMP to run on each interface");
285 SYSCTL_PROC(_net_inet_igmp, OID_AUTO, gsrdelay,
286 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
287 &VNET_NAME(igmp_gsrdelay.tv_sec), 0, sysctl_igmp_gsr, "I",
288 "Rate limit for IGMPv3 Group-and-Source queries in seconds");
289
290 /*
291 * Non-virtualized sysctls.
292 */
293 static SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo,
294 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_igmp_ifinfo,
295 "Per-interface IGMPv3 state");
296
297 static __inline void
298 igmp_save_context(struct mbuf *m, struct ifnet *ifp)
299 {
300
301 #ifdef VIMAGE
302 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
303 #endif /* VIMAGE */
304 m->m_pkthdr.flowid = ifp->if_index;
305 }
306
307 static __inline void
308 igmp_scrub_context(struct mbuf *m)
309 {
310
311 m->m_pkthdr.PH_loc.ptr = NULL;
312 m->m_pkthdr.flowid = 0;
313 }
314
315 #ifdef KTR
316 static __inline char *
317 inet_ntoa_haddr(in_addr_t haddr)
318 {
319 struct in_addr ia;
320
321 ia.s_addr = htonl(haddr);
322 return (inet_ntoa(ia));
323 }
324 #endif
325
326 /*
327 * Restore context from a queued IGMP output chain.
328 * Return saved ifindex.
329 *
330 * VIMAGE: The assertion is there to make sure that we
331 * actually called CURVNET_SET() with what's in the mbuf chain.
332 */
333 static __inline uint32_t
334 igmp_restore_context(struct mbuf *m)
335 {
336
337 #ifdef notyet
338 #if defined(VIMAGE) && defined(INVARIANTS)
339 KASSERT(curvnet == (m->m_pkthdr.PH_loc.ptr),
340 ("%s: called when curvnet was not restored", __func__));
341 #endif
342 #endif
343 return (m->m_pkthdr.flowid);
344 }
345
346 /*
347 * Retrieve or set default IGMP version.
348 *
349 * VIMAGE: Assume curvnet set by caller.
350 * SMPng: NOTE: Serialized by IGMP lock.
351 */
352 static int
353 sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS)
354 {
355 int error;
356 int new;
357
358 error = sysctl_wire_old_buffer(req, sizeof(int));
359 if (error)
360 return (error);
361
362 IGMP_LOCK();
363
364 new = V_igmp_default_version;
365
366 error = sysctl_handle_int(oidp, &new, 0, req);
367 if (error || !req->newptr)
368 goto out_locked;
369
370 if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) {
371 error = EINVAL;
372 goto out_locked;
373 }
374
375 CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d",
376 V_igmp_default_version, new);
377
378 V_igmp_default_version = new;
379
380 out_locked:
381 IGMP_UNLOCK();
382 return (error);
383 }
384
385 /*
386 * Retrieve or set threshold between group-source queries in seconds.
387 *
388 * VIMAGE: Assume curvnet set by caller.
389 * SMPng: NOTE: Serialized by IGMP lock.
390 */
391 static int
392 sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS)
393 {
394 int error;
395 int i;
396
397 error = sysctl_wire_old_buffer(req, sizeof(int));
398 if (error)
399 return (error);
400
401 IGMP_LOCK();
402
403 i = V_igmp_gsrdelay.tv_sec;
404
405 error = sysctl_handle_int(oidp, &i, 0, req);
406 if (error || !req->newptr)
407 goto out_locked;
408
409 if (i < -1 || i >= 60) {
410 error = EINVAL;
411 goto out_locked;
412 }
413
414 CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d",
415 V_igmp_gsrdelay.tv_sec, i);
416 V_igmp_gsrdelay.tv_sec = i;
417
418 out_locked:
419 IGMP_UNLOCK();
420 return (error);
421 }
422
423 /*
424 * Expose struct igmp_ifsoftc to userland, keyed by ifindex.
425 * For use by ifmcstat(8).
426 *
427 * SMPng: NOTE: Does an unlocked ifindex space read.
428 * VIMAGE: Assume curvnet set by caller. The node handler itself
429 * is not directly virtualized.
430 */
431 static int
432 sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
433 {
434 int *name;
435 int error;
436 u_int namelen;
437 struct ifnet *ifp;
438 struct igmp_ifsoftc *igi;
439
440 name = (int *)arg1;
441 namelen = arg2;
442
443 if (req->newptr != NULL)
444 return (EPERM);
445
446 if (namelen != 1)
447 return (EINVAL);
448
449 error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo));
450 if (error)
451 return (error);
452
453 IN_MULTI_LOCK();
454 IGMP_LOCK();
455
456 if (name[0] <= 0 || name[0] > V_if_index) {
457 error = ENOENT;
458 goto out_locked;
459 }
460
461 error = ENOENT;
462
463 ifp = ifnet_byindex(name[0]);
464 if (ifp == NULL)
465 goto out_locked;
466
467 LIST_FOREACH(igi, &V_igi_head, igi_link) {
468 if (ifp == igi->igi_ifp) {
469 struct igmp_ifinfo info;
470
471 info.igi_version = igi->igi_version;
472 info.igi_v1_timer = igi->igi_v1_timer;
473 info.igi_v2_timer = igi->igi_v2_timer;
474 info.igi_v3_timer = igi->igi_v3_timer;
475 info.igi_flags = igi->igi_flags;
476 info.igi_rv = igi->igi_rv;
477 info.igi_qi = igi->igi_qi;
478 info.igi_qri = igi->igi_qri;
479 info.igi_uri = igi->igi_uri;
480 error = SYSCTL_OUT(req, &info, sizeof(info));
481 break;
482 }
483 }
484
485 out_locked:
486 IGMP_UNLOCK();
487 IN_MULTI_UNLOCK();
488 return (error);
489 }
490
491 /*
492 * Dispatch an entire queue of pending packet chains
493 * using the netisr.
494 * VIMAGE: Assumes the vnet pointer has been set.
495 */
496 static void
497 igmp_dispatch_queue(struct mbufq *mq, int limit, const int loop)
498 {
499 struct mbuf *m;
500
501 while ((m = mbufq_dequeue(mq)) != NULL) {
502 CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, mq, m);
503 if (loop)
504 m->m_flags |= M_IGMP_LOOP;
505 netisr_dispatch(NETISR_IGMP, m);
506 if (--limit == 0)
507 break;
508 }
509 }
510
511 /*
512 * Filter outgoing IGMP report state by group.
513 *
514 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1).
515 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are
516 * disabled for all groups in the 224.0.0.0/24 link-local scope. However,
517 * this may break certain IGMP snooping switches which rely on the old
518 * report behaviour.
519 *
520 * Return zero if the given group is one for which IGMP reports
521 * should be suppressed, or non-zero if reports should be issued.
522 */
523 static __inline int
524 igmp_isgroupreported(const struct in_addr addr)
525 {
526
527 if (in_allhosts(addr) ||
528 ((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr)))))
529 return (0);
530
531 return (1);
532 }
533
534 /*
535 * Construct a Router Alert option to use in outgoing packets.
536 */
537 static struct mbuf *
538 igmp_ra_alloc(void)
539 {
540 struct mbuf *m;
541 struct ipoption *p;
542
543 m = m_get(M_WAITOK, MT_DATA);
544 p = mtod(m, struct ipoption *);
545 p->ipopt_dst.s_addr = INADDR_ANY;
546 p->ipopt_list[0] = IPOPT_RA; /* Router Alert Option */
547 p->ipopt_list[1] = 0x04; /* 4 bytes long */
548 p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */
549 p->ipopt_list[3] = 0x00; /* pad byte */
550 m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1];
551
552 return (m);
553 }
554
555 /*
556 * Attach IGMP when PF_INET is attached to an interface.
557 */
558 struct igmp_ifsoftc *
559 igmp_domifattach(struct ifnet *ifp)
560 {
561 struct igmp_ifsoftc *igi;
562
563 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
564 __func__, ifp, ifp->if_xname);
565
566 IGMP_LOCK();
567
568 igi = igi_alloc_locked(ifp);
569 if (!(ifp->if_flags & IFF_MULTICAST))
570 igi->igi_flags |= IGIF_SILENT;
571
572 IGMP_UNLOCK();
573
574 return (igi);
575 }
576
577 /*
578 * VIMAGE: assume curvnet set by caller.
579 */
580 static struct igmp_ifsoftc *
581 igi_alloc_locked(/*const*/ struct ifnet *ifp)
582 {
583 struct igmp_ifsoftc *igi;
584
585 IGMP_LOCK_ASSERT();
586
587 igi = malloc(sizeof(struct igmp_ifsoftc), M_IGMP, M_NOWAIT|M_ZERO);
588 if (igi == NULL)
589 goto out;
590
591 igi->igi_ifp = ifp;
592 igi->igi_version = V_igmp_default_version;
593 igi->igi_flags = 0;
594 igi->igi_rv = IGMP_RV_INIT;
595 igi->igi_qi = IGMP_QI_INIT;
596 igi->igi_qri = IGMP_QRI_INIT;
597 igi->igi_uri = IGMP_URI_INIT;
598 SLIST_INIT(&igi->igi_relinmhead);
599 mbufq_init(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
600
601 LIST_INSERT_HEAD(&V_igi_head, igi, igi_link);
602
603 CTR2(KTR_IGMPV3, "allocate igmp_ifsoftc for ifp %p(%s)",
604 ifp, ifp->if_xname);
605
606 out:
607 return (igi);
608 }
609
610 /*
611 * Hook for ifdetach.
612 *
613 * NOTE: Some finalization tasks need to run before the protocol domain
614 * is detached, but also before the link layer does its cleanup.
615 *
616 * SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK().
617 * XXX This is also bitten by unlocked ifma_protospec access.
618 */
619 void
620 igmp_ifdetach(struct ifnet *ifp)
621 {
622 struct igmp_ifsoftc *igi;
623 struct ifmultiaddr *ifma;
624 struct in_multi *inm, *tinm;
625
626 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp,
627 ifp->if_xname);
628
629 IGMP_LOCK();
630
631 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
632 if (igi->igi_version == IGMP_VERSION_3) {
633 IF_ADDR_RLOCK(ifp);
634 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
635 if (ifma->ifma_addr->sa_family != AF_INET ||
636 ifma->ifma_protospec == NULL)
637 continue;
638 #if 0
639 KASSERT(ifma->ifma_protospec != NULL,
640 ("%s: ifma_protospec is NULL", __func__));
641 #endif
642 inm = (struct in_multi *)ifma->ifma_protospec;
643 if (inm->inm_state == IGMP_LEAVING_MEMBER) {
644 SLIST_INSERT_HEAD(&igi->igi_relinmhead,
645 inm, inm_nrele);
646 }
647 inm_clear_recorded(inm);
648 }
649 IF_ADDR_RUNLOCK(ifp);
650 /*
651 * Free the in_multi reference(s) for this IGMP lifecycle.
652 */
653 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele,
654 tinm) {
655 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele);
656 inm_release_locked(inm);
657 }
658 }
659
660 IGMP_UNLOCK();
661 }
662
663 /*
664 * Hook for domifdetach.
665 */
666 void
667 igmp_domifdetach(struct ifnet *ifp)
668 {
669
670 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
671 __func__, ifp, ifp->if_xname);
672
673 IGMP_LOCK();
674 igi_delete_locked(ifp);
675 IGMP_UNLOCK();
676 }
677
678 static void
679 igi_delete_locked(const struct ifnet *ifp)
680 {
681 struct igmp_ifsoftc *igi, *tigi;
682
683 CTR3(KTR_IGMPV3, "%s: freeing igmp_ifsoftc for ifp %p(%s)",
684 __func__, ifp, ifp->if_xname);
685
686 IGMP_LOCK_ASSERT();
687
688 LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) {
689 if (igi->igi_ifp == ifp) {
690 /*
691 * Free deferred General Query responses.
692 */
693 mbufq_drain(&igi->igi_gq);
694
695 LIST_REMOVE(igi, igi_link);
696
697 KASSERT(SLIST_EMPTY(&igi->igi_relinmhead),
698 ("%s: there are dangling in_multi references",
699 __func__));
700
701 free(igi, M_IGMP);
702 return;
703 }
704 }
705 }
706
707 /*
708 * Process a received IGMPv1 query.
709 * Return non-zero if the message should be dropped.
710 *
711 * VIMAGE: The curvnet pointer is derived from the input ifp.
712 */
713 static int
714 igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip,
715 const struct igmp *igmp)
716 {
717 struct ifmultiaddr *ifma;
718 struct igmp_ifsoftc *igi;
719 struct in_multi *inm;
720
721 /*
722 * IGMPv1 Host Mmembership Queries SHOULD always be addressed to
723 * 224.0.0.1. They are always treated as General Queries.
724 * igmp_group is always ignored. Do not drop it as a userland
725 * daemon may wish to see it.
726 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
727 */
728 if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) {
729 IGMPSTAT_INC(igps_rcv_badqueries);
730 return (0);
731 }
732 IGMPSTAT_INC(igps_rcv_gen_queries);
733
734 IN_MULTI_LOCK();
735 IGMP_LOCK();
736
737 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
738 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
739
740 if (igi->igi_flags & IGIF_LOOPBACK) {
741 CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)",
742 ifp, ifp->if_xname);
743 goto out_locked;
744 }
745
746 /*
747 * Switch to IGMPv1 host compatibility mode.
748 */
749 igmp_set_version(igi, IGMP_VERSION_1);
750
751 CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname);
752
753 /*
754 * Start the timers in all of our group records
755 * for the interface on which the query arrived,
756 * except those which are already running.
757 */
758 IF_ADDR_RLOCK(ifp);
759 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
760 if (ifma->ifma_addr->sa_family != AF_INET ||
761 ifma->ifma_protospec == NULL)
762 continue;
763 inm = (struct in_multi *)ifma->ifma_protospec;
764 if (inm->inm_timer != 0)
765 continue;
766 switch (inm->inm_state) {
767 case IGMP_NOT_MEMBER:
768 case IGMP_SILENT_MEMBER:
769 break;
770 case IGMP_G_QUERY_PENDING_MEMBER:
771 case IGMP_SG_QUERY_PENDING_MEMBER:
772 case IGMP_REPORTING_MEMBER:
773 case IGMP_IDLE_MEMBER:
774 case IGMP_LAZY_MEMBER:
775 case IGMP_SLEEPING_MEMBER:
776 case IGMP_AWAKENING_MEMBER:
777 inm->inm_state = IGMP_REPORTING_MEMBER;
778 inm->inm_timer = IGMP_RANDOM_DELAY(
779 IGMP_V1V2_MAX_RI * PR_FASTHZ);
780 V_current_state_timers_running = 1;
781 break;
782 case IGMP_LEAVING_MEMBER:
783 break;
784 }
785 }
786 IF_ADDR_RUNLOCK(ifp);
787
788 out_locked:
789 IGMP_UNLOCK();
790 IN_MULTI_UNLOCK();
791
792 return (0);
793 }
794
795 /*
796 * Process a received IGMPv2 general or group-specific query.
797 */
798 static int
799 igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip,
800 const struct igmp *igmp)
801 {
802 struct ifmultiaddr *ifma;
803 struct igmp_ifsoftc *igi;
804 struct in_multi *inm;
805 int is_general_query;
806 uint16_t timer;
807
808 is_general_query = 0;
809
810 /*
811 * Validate address fields upfront.
812 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
813 */
814 if (in_nullhost(igmp->igmp_group)) {
815 /*
816 * IGMPv2 General Query.
817 * If this was not sent to the all-hosts group, ignore it.
818 */
819 if (!in_allhosts(ip->ip_dst))
820 return (0);
821 IGMPSTAT_INC(igps_rcv_gen_queries);
822 is_general_query = 1;
823 } else {
824 /* IGMPv2 Group-Specific Query. */
825 IGMPSTAT_INC(igps_rcv_group_queries);
826 }
827
828 IN_MULTI_LOCK();
829 IGMP_LOCK();
830
831 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
832 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
833
834 if (igi->igi_flags & IGIF_LOOPBACK) {
835 CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)",
836 ifp, ifp->if_xname);
837 goto out_locked;
838 }
839
840 /*
841 * Ignore v2 query if in v1 Compatibility Mode.
842 */
843 if (igi->igi_version == IGMP_VERSION_1)
844 goto out_locked;
845
846 igmp_set_version(igi, IGMP_VERSION_2);
847
848 timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE;
849 if (timer == 0)
850 timer = 1;
851
852 if (is_general_query) {
853 /*
854 * For each reporting group joined on this
855 * interface, kick the report timer.
856 */
857 CTR2(KTR_IGMPV3, "process v2 general query on ifp %p(%s)",
858 ifp, ifp->if_xname);
859 IF_ADDR_RLOCK(ifp);
860 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
861 if (ifma->ifma_addr->sa_family != AF_INET ||
862 ifma->ifma_protospec == NULL)
863 continue;
864 inm = (struct in_multi *)ifma->ifma_protospec;
865 igmp_v2_update_group(inm, timer);
866 }
867 IF_ADDR_RUNLOCK(ifp);
868 } else {
869 /*
870 * Group-specific IGMPv2 query, we need only
871 * look up the single group to process it.
872 */
873 inm = inm_lookup(ifp, igmp->igmp_group);
874 if (inm != NULL) {
875 CTR3(KTR_IGMPV3, "process v2 query %s on ifp %p(%s)",
876 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
877 igmp_v2_update_group(inm, timer);
878 }
879 }
880
881 out_locked:
882 IGMP_UNLOCK();
883 IN_MULTI_UNLOCK();
884
885 return (0);
886 }
887
888 /*
889 * Update the report timer on a group in response to an IGMPv2 query.
890 *
891 * If we are becoming the reporting member for this group, start the timer.
892 * If we already are the reporting member for this group, and timer is
893 * below the threshold, reset it.
894 *
895 * We may be updating the group for the first time since we switched
896 * to IGMPv3. If we are, then we must clear any recorded source lists,
897 * and transition to REPORTING state; the group timer is overloaded
898 * for group and group-source query responses.
899 *
900 * Unlike IGMPv3, the delay per group should be jittered
901 * to avoid bursts of IGMPv2 reports.
902 */
903 static void
904 igmp_v2_update_group(struct in_multi *inm, const int timer)
905 {
906
907 CTR4(KTR_IGMPV3, "%s: %s/%s timer=%d", __func__,
908 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname, timer);
909
910 IN_MULTI_LOCK_ASSERT();
911
912 switch (inm->inm_state) {
913 case IGMP_NOT_MEMBER:
914 case IGMP_SILENT_MEMBER:
915 break;
916 case IGMP_REPORTING_MEMBER:
917 if (inm->inm_timer != 0 &&
918 inm->inm_timer <= timer) {
919 CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, "
920 "skipping.", __func__);
921 break;
922 }
923 /* FALLTHROUGH */
924 case IGMP_SG_QUERY_PENDING_MEMBER:
925 case IGMP_G_QUERY_PENDING_MEMBER:
926 case IGMP_IDLE_MEMBER:
927 case IGMP_LAZY_MEMBER:
928 case IGMP_AWAKENING_MEMBER:
929 CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__);
930 inm->inm_state = IGMP_REPORTING_MEMBER;
931 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
932 V_current_state_timers_running = 1;
933 break;
934 case IGMP_SLEEPING_MEMBER:
935 CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__);
936 inm->inm_state = IGMP_AWAKENING_MEMBER;
937 break;
938 case IGMP_LEAVING_MEMBER:
939 break;
940 }
941 }
942
943 /*
944 * Process a received IGMPv3 general, group-specific or
945 * group-and-source-specific query.
946 * Assumes m has already been pulled up to the full IGMP message length.
947 * Return 0 if successful, otherwise an appropriate error code is returned.
948 */
949 static int
950 igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip,
951 /*const*/ struct igmpv3 *igmpv3)
952 {
953 struct igmp_ifsoftc *igi;
954 struct in_multi *inm;
955 int is_general_query;
956 uint32_t maxresp, nsrc, qqi;
957 uint16_t timer;
958 uint8_t qrv;
959
960 is_general_query = 0;
961
962 CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname);
963
964 maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */
965 if (maxresp >= 128) {
966 maxresp = IGMP_MANT(igmpv3->igmp_code) <<
967 (IGMP_EXP(igmpv3->igmp_code) + 3);
968 }
969
970 /*
971 * Robustness must never be less than 2 for on-wire IGMPv3.
972 * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make
973 * an exception for interfaces whose IGMPv3 state changes
974 * are redirected to loopback (e.g. MANET).
975 */
976 qrv = IGMP_QRV(igmpv3->igmp_misc);
977 if (qrv < 2) {
978 CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__,
979 qrv, IGMP_RV_INIT);
980 qrv = IGMP_RV_INIT;
981 }
982
983 qqi = igmpv3->igmp_qqi;
984 if (qqi >= 128) {
985 qqi = IGMP_MANT(igmpv3->igmp_qqi) <<
986 (IGMP_EXP(igmpv3->igmp_qqi) + 3);
987 }
988
989 timer = maxresp * PR_FASTHZ / IGMP_TIMER_SCALE;
990 if (timer == 0)
991 timer = 1;
992
993 nsrc = ntohs(igmpv3->igmp_numsrc);
994
995 /*
996 * Validate address fields and versions upfront before
997 * accepting v3 query.
998 * XXX SMPng: Unlocked access to igmpstat counters here.
999 */
1000 if (in_nullhost(igmpv3->igmp_group)) {
1001 /*
1002 * IGMPv3 General Query.
1003 *
1004 * General Queries SHOULD be directed to 224.0.0.1.
1005 * A general query with a source list has undefined
1006 * behaviour; discard it.
1007 */
1008 IGMPSTAT_INC(igps_rcv_gen_queries);
1009 if (!in_allhosts(ip->ip_dst) || nsrc > 0) {
1010 IGMPSTAT_INC(igps_rcv_badqueries);
1011 return (0);
1012 }
1013 is_general_query = 1;
1014 } else {
1015 /* Group or group-source specific query. */
1016 if (nsrc == 0)
1017 IGMPSTAT_INC(igps_rcv_group_queries);
1018 else
1019 IGMPSTAT_INC(igps_rcv_gsr_queries);
1020 }
1021
1022 IN_MULTI_LOCK();
1023 IGMP_LOCK();
1024
1025 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
1026 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
1027
1028 if (igi->igi_flags & IGIF_LOOPBACK) {
1029 CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)",
1030 ifp, ifp->if_xname);
1031 goto out_locked;
1032 }
1033
1034 /*
1035 * Discard the v3 query if we're in Compatibility Mode.
1036 * The RFC is not obviously worded that hosts need to stay in
1037 * compatibility mode until the Old Version Querier Present
1038 * timer expires.
1039 */
1040 if (igi->igi_version != IGMP_VERSION_3) {
1041 CTR3(KTR_IGMPV3, "ignore v3 query in v%d mode on ifp %p(%s)",
1042 igi->igi_version, ifp, ifp->if_xname);
1043 goto out_locked;
1044 }
1045
1046 igmp_set_version(igi, IGMP_VERSION_3);
1047 igi->igi_rv = qrv;
1048 igi->igi_qi = qqi;
1049 igi->igi_qri = maxresp;
1050
1051 CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi,
1052 maxresp);
1053
1054 if (is_general_query) {
1055 /*
1056 * Schedule a current-state report on this ifp for
1057 * all groups, possibly containing source lists.
1058 * If there is a pending General Query response
1059 * scheduled earlier than the selected delay, do
1060 * not schedule any other reports.
1061 * Otherwise, reset the interface timer.
1062 */
1063 CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)",
1064 ifp, ifp->if_xname);
1065 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) {
1066 igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer);
1067 V_interface_timers_running = 1;
1068 }
1069 } else {
1070 /*
1071 * Group-source-specific queries are throttled on
1072 * a per-group basis to defeat denial-of-service attempts.
1073 * Queries for groups we are not a member of on this
1074 * link are simply ignored.
1075 */
1076 inm = inm_lookup(ifp, igmpv3->igmp_group);
1077 if (inm == NULL)
1078 goto out_locked;
1079 if (nsrc > 0) {
1080 if (!ratecheck(&inm->inm_lastgsrtv,
1081 &V_igmp_gsrdelay)) {
1082 CTR1(KTR_IGMPV3, "%s: GS query throttled.",
1083 __func__);
1084 IGMPSTAT_INC(igps_drop_gsr_queries);
1085 goto out_locked;
1086 }
1087 }
1088 CTR3(KTR_IGMPV3, "process v3 %s query on ifp %p(%s)",
1089 inet_ntoa(igmpv3->igmp_group), ifp, ifp->if_xname);
1090 /*
1091 * If there is a pending General Query response
1092 * scheduled sooner than the selected delay, no
1093 * further report need be scheduled.
1094 * Otherwise, prepare to respond to the
1095 * group-specific or group-and-source query.
1096 */
1097 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer)
1098 igmp_input_v3_group_query(inm, igi, timer, igmpv3);
1099 }
1100
1101 out_locked:
1102 IGMP_UNLOCK();
1103 IN_MULTI_UNLOCK();
1104
1105 return (0);
1106 }
1107
1108 /*
1109 * Process a received IGMPv3 group-specific or group-and-source-specific
1110 * query.
1111 * Return <0 if any error occurred. Currently this is ignored.
1112 */
1113 static int
1114 igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifsoftc *igi,
1115 int timer, /*const*/ struct igmpv3 *igmpv3)
1116 {
1117 int retval;
1118 uint16_t nsrc;
1119
1120 IN_MULTI_LOCK_ASSERT();
1121 IGMP_LOCK_ASSERT();
1122
1123 retval = 0;
1124
1125 switch (inm->inm_state) {
1126 case IGMP_NOT_MEMBER:
1127 case IGMP_SILENT_MEMBER:
1128 case IGMP_SLEEPING_MEMBER:
1129 case IGMP_LAZY_MEMBER:
1130 case IGMP_AWAKENING_MEMBER:
1131 case IGMP_IDLE_MEMBER:
1132 case IGMP_LEAVING_MEMBER:
1133 return (retval);
1134 break;
1135 case IGMP_REPORTING_MEMBER:
1136 case IGMP_G_QUERY_PENDING_MEMBER:
1137 case IGMP_SG_QUERY_PENDING_MEMBER:
1138 break;
1139 }
1140
1141 nsrc = ntohs(igmpv3->igmp_numsrc);
1142
1143 /*
1144 * Deal with group-specific queries upfront.
1145 * If any group query is already pending, purge any recorded
1146 * source-list state if it exists, and schedule a query response
1147 * for this group-specific query.
1148 */
1149 if (nsrc == 0) {
1150 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
1151 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) {
1152 inm_clear_recorded(inm);
1153 timer = min(inm->inm_timer, timer);
1154 }
1155 inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER;
1156 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1157 V_current_state_timers_running = 1;
1158 return (retval);
1159 }
1160
1161 /*
1162 * Deal with the case where a group-and-source-specific query has
1163 * been received but a group-specific query is already pending.
1164 */
1165 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) {
1166 timer = min(inm->inm_timer, timer);
1167 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1168 V_current_state_timers_running = 1;
1169 return (retval);
1170 }
1171
1172 /*
1173 * Finally, deal with the case where a group-and-source-specific
1174 * query has been received, where a response to a previous g-s-r
1175 * query exists, or none exists.
1176 * In this case, we need to parse the source-list which the Querier
1177 * has provided us with and check if we have any source list filter
1178 * entries at T1 for these sources. If we do not, there is no need
1179 * schedule a report and the query may be dropped.
1180 * If we do, we must record them and schedule a current-state
1181 * report for those sources.
1182 * FIXME: Handling source lists larger than 1 mbuf requires that
1183 * we pass the mbuf chain pointer down to this function, and use
1184 * m_getptr() to walk the chain.
1185 */
1186 if (inm->inm_nsrc > 0) {
1187 const struct in_addr *ap;
1188 int i, nrecorded;
1189
1190 ap = (const struct in_addr *)(igmpv3 + 1);
1191 nrecorded = 0;
1192 for (i = 0; i < nsrc; i++, ap++) {
1193 retval = inm_record_source(inm, ap->s_addr);
1194 if (retval < 0)
1195 break;
1196 nrecorded += retval;
1197 }
1198 if (nrecorded > 0) {
1199 CTR1(KTR_IGMPV3,
1200 "%s: schedule response to SG query", __func__);
1201 inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER;
1202 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1203 V_current_state_timers_running = 1;
1204 }
1205 }
1206
1207 return (retval);
1208 }
1209
1210 /*
1211 * Process a received IGMPv1 host membership report.
1212 *
1213 * NOTE: 0.0.0.0 workaround breaks const correctness.
1214 */
1215 static int
1216 igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1217 /*const*/ struct igmp *igmp)
1218 {
1219 struct rm_priotracker in_ifa_tracker;
1220 struct in_ifaddr *ia;
1221 struct in_multi *inm;
1222
1223 IGMPSTAT_INC(igps_rcv_reports);
1224
1225 if (ifp->if_flags & IFF_LOOPBACK)
1226 return (0);
1227
1228 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1229 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1230 IGMPSTAT_INC(igps_rcv_badreports);
1231 return (EINVAL);
1232 }
1233
1234 /*
1235 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1236 * Booting clients may use the source address 0.0.0.0. Some
1237 * IGMP daemons may not know how to use IP_RECVIF to determine
1238 * the interface upon which this message was received.
1239 * Replace 0.0.0.0 with the subnet address if told to do so.
1240 */
1241 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1242 IFP_TO_IA(ifp, ia, &in_ifa_tracker);
1243 if (ia != NULL) {
1244 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1245 ifa_free(&ia->ia_ifa);
1246 }
1247 }
1248
1249 CTR3(KTR_IGMPV3, "process v1 report %s on ifp %p(%s)",
1250 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1251
1252 /*
1253 * IGMPv1 report suppression.
1254 * If we are a member of this group, and our membership should be
1255 * reported, stop our group timer and transition to the 'lazy' state.
1256 */
1257 IN_MULTI_LOCK();
1258 inm = inm_lookup(ifp, igmp->igmp_group);
1259 if (inm != NULL) {
1260 struct igmp_ifsoftc *igi;
1261
1262 igi = inm->inm_igi;
1263 if (igi == NULL) {
1264 KASSERT(igi != NULL,
1265 ("%s: no igi for ifp %p", __func__, ifp));
1266 goto out_locked;
1267 }
1268
1269 IGMPSTAT_INC(igps_rcv_ourreports);
1270
1271 /*
1272 * If we are in IGMPv3 host mode, do not allow the
1273 * other host's IGMPv1 report to suppress our reports
1274 * unless explicitly configured to do so.
1275 */
1276 if (igi->igi_version == IGMP_VERSION_3) {
1277 if (V_igmp_legacysupp)
1278 igmp_v3_suppress_group_record(inm);
1279 goto out_locked;
1280 }
1281
1282 inm->inm_timer = 0;
1283
1284 switch (inm->inm_state) {
1285 case IGMP_NOT_MEMBER:
1286 case IGMP_SILENT_MEMBER:
1287 break;
1288 case IGMP_IDLE_MEMBER:
1289 case IGMP_LAZY_MEMBER:
1290 case IGMP_AWAKENING_MEMBER:
1291 CTR3(KTR_IGMPV3,
1292 "report suppressed for %s on ifp %p(%s)",
1293 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1294 case IGMP_SLEEPING_MEMBER:
1295 inm->inm_state = IGMP_SLEEPING_MEMBER;
1296 break;
1297 case IGMP_REPORTING_MEMBER:
1298 CTR3(KTR_IGMPV3,
1299 "report suppressed for %s on ifp %p(%s)",
1300 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1301 if (igi->igi_version == IGMP_VERSION_1)
1302 inm->inm_state = IGMP_LAZY_MEMBER;
1303 else if (igi->igi_version == IGMP_VERSION_2)
1304 inm->inm_state = IGMP_SLEEPING_MEMBER;
1305 break;
1306 case IGMP_G_QUERY_PENDING_MEMBER:
1307 case IGMP_SG_QUERY_PENDING_MEMBER:
1308 case IGMP_LEAVING_MEMBER:
1309 break;
1310 }
1311 }
1312
1313 out_locked:
1314 IN_MULTI_UNLOCK();
1315
1316 return (0);
1317 }
1318
1319 /*
1320 * Process a received IGMPv2 host membership report.
1321 *
1322 * NOTE: 0.0.0.0 workaround breaks const correctness.
1323 */
1324 static int
1325 igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1326 /*const*/ struct igmp *igmp)
1327 {
1328 struct rm_priotracker in_ifa_tracker;
1329 struct in_ifaddr *ia;
1330 struct in_multi *inm;
1331
1332 /*
1333 * Make sure we don't hear our own membership report. Fast
1334 * leave requires knowing that we are the only member of a
1335 * group.
1336 */
1337 IFP_TO_IA(ifp, ia, &in_ifa_tracker);
1338 if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) {
1339 ifa_free(&ia->ia_ifa);
1340 return (0);
1341 }
1342
1343 IGMPSTAT_INC(igps_rcv_reports);
1344
1345 if (ifp->if_flags & IFF_LOOPBACK) {
1346 if (ia != NULL)
1347 ifa_free(&ia->ia_ifa);
1348 return (0);
1349 }
1350
1351 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1352 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1353 if (ia != NULL)
1354 ifa_free(&ia->ia_ifa);
1355 IGMPSTAT_INC(igps_rcv_badreports);
1356 return (EINVAL);
1357 }
1358
1359 /*
1360 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1361 * Booting clients may use the source address 0.0.0.0. Some
1362 * IGMP daemons may not know how to use IP_RECVIF to determine
1363 * the interface upon which this message was received.
1364 * Replace 0.0.0.0 with the subnet address if told to do so.
1365 */
1366 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1367 if (ia != NULL)
1368 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1369 }
1370 if (ia != NULL)
1371 ifa_free(&ia->ia_ifa);
1372
1373 CTR3(KTR_IGMPV3, "process v2 report %s on ifp %p(%s)",
1374 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1375
1376 /*
1377 * IGMPv2 report suppression.
1378 * If we are a member of this group, and our membership should be
1379 * reported, and our group timer is pending or about to be reset,
1380 * stop our group timer by transitioning to the 'lazy' state.
1381 */
1382 IN_MULTI_LOCK();
1383 inm = inm_lookup(ifp, igmp->igmp_group);
1384 if (inm != NULL) {
1385 struct igmp_ifsoftc *igi;
1386
1387 igi = inm->inm_igi;
1388 KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp));
1389
1390 IGMPSTAT_INC(igps_rcv_ourreports);
1391
1392 /*
1393 * If we are in IGMPv3 host mode, do not allow the
1394 * other host's IGMPv1 report to suppress our reports
1395 * unless explicitly configured to do so.
1396 */
1397 if (igi->igi_version == IGMP_VERSION_3) {
1398 if (V_igmp_legacysupp)
1399 igmp_v3_suppress_group_record(inm);
1400 goto out_locked;
1401 }
1402
1403 inm->inm_timer = 0;
1404
1405 switch (inm->inm_state) {
1406 case IGMP_NOT_MEMBER:
1407 case IGMP_SILENT_MEMBER:
1408 case IGMP_SLEEPING_MEMBER:
1409 break;
1410 case IGMP_REPORTING_MEMBER:
1411 case IGMP_IDLE_MEMBER:
1412 case IGMP_AWAKENING_MEMBER:
1413 CTR3(KTR_IGMPV3,
1414 "report suppressed for %s on ifp %p(%s)",
1415 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1416 case IGMP_LAZY_MEMBER:
1417 inm->inm_state = IGMP_LAZY_MEMBER;
1418 break;
1419 case IGMP_G_QUERY_PENDING_MEMBER:
1420 case IGMP_SG_QUERY_PENDING_MEMBER:
1421 case IGMP_LEAVING_MEMBER:
1422 break;
1423 }
1424 }
1425
1426 out_locked:
1427 IN_MULTI_UNLOCK();
1428
1429 return (0);
1430 }
1431
1432 int
1433 igmp_input(struct mbuf **mp, int *offp, int proto)
1434 {
1435 int iphlen;
1436 struct ifnet *ifp;
1437 struct igmp *igmp;
1438 struct ip *ip;
1439 struct mbuf *m;
1440 int igmplen;
1441 int minlen;
1442 int queryver;
1443
1444 CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, *mp, *offp);
1445
1446 m = *mp;
1447 ifp = m->m_pkthdr.rcvif;
1448 *mp = NULL;
1449
1450 IGMPSTAT_INC(igps_rcv_total);
1451
1452 ip = mtod(m, struct ip *);
1453 iphlen = *offp;
1454 igmplen = ntohs(ip->ip_len) - iphlen;
1455
1456 /*
1457 * Validate lengths.
1458 */
1459 if (igmplen < IGMP_MINLEN) {
1460 IGMPSTAT_INC(igps_rcv_tooshort);
1461 m_freem(m);
1462 return (IPPROTO_DONE);
1463 }
1464
1465 /*
1466 * Always pullup to the minimum size for v1/v2 or v3
1467 * to amortize calls to m_pullup().
1468 */
1469 minlen = iphlen;
1470 if (igmplen >= IGMP_V3_QUERY_MINLEN)
1471 minlen += IGMP_V3_QUERY_MINLEN;
1472 else
1473 minlen += IGMP_MINLEN;
1474 if ((!M_WRITABLE(m) || m->m_len < minlen) &&
1475 (m = m_pullup(m, minlen)) == NULL) {
1476 IGMPSTAT_INC(igps_rcv_tooshort);
1477 return (IPPROTO_DONE);
1478 }
1479 ip = mtod(m, struct ip *);
1480
1481 /*
1482 * Validate checksum.
1483 */
1484 m->m_data += iphlen;
1485 m->m_len -= iphlen;
1486 igmp = mtod(m, struct igmp *);
1487 if (in_cksum(m, igmplen)) {
1488 IGMPSTAT_INC(igps_rcv_badsum);
1489 m_freem(m);
1490 return (IPPROTO_DONE);
1491 }
1492 m->m_data -= iphlen;
1493 m->m_len += iphlen;
1494
1495 /*
1496 * IGMP control traffic is link-scope, and must have a TTL of 1.
1497 * DVMRP traffic (e.g. mrinfo, mtrace) is an exception;
1498 * probe packets may come from beyond the LAN.
1499 */
1500 if (igmp->igmp_type != IGMP_DVMRP && ip->ip_ttl != 1) {
1501 IGMPSTAT_INC(igps_rcv_badttl);
1502 m_freem(m);
1503 return (IPPROTO_DONE);
1504 }
1505
1506 switch (igmp->igmp_type) {
1507 case IGMP_HOST_MEMBERSHIP_QUERY:
1508 if (igmplen == IGMP_MINLEN) {
1509 if (igmp->igmp_code == 0)
1510 queryver = IGMP_VERSION_1;
1511 else
1512 queryver = IGMP_VERSION_2;
1513 } else if (igmplen >= IGMP_V3_QUERY_MINLEN) {
1514 queryver = IGMP_VERSION_3;
1515 } else {
1516 IGMPSTAT_INC(igps_rcv_tooshort);
1517 m_freem(m);
1518 return (IPPROTO_DONE);
1519 }
1520
1521 switch (queryver) {
1522 case IGMP_VERSION_1:
1523 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1524 if (!V_igmp_v1enable)
1525 break;
1526 if (igmp_input_v1_query(ifp, ip, igmp) != 0) {
1527 m_freem(m);
1528 return (IPPROTO_DONE);
1529 }
1530 break;
1531
1532 case IGMP_VERSION_2:
1533 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1534 if (!V_igmp_v2enable)
1535 break;
1536 if (igmp_input_v2_query(ifp, ip, igmp) != 0) {
1537 m_freem(m);
1538 return (IPPROTO_DONE);
1539 }
1540 break;
1541
1542 case IGMP_VERSION_3: {
1543 struct igmpv3 *igmpv3;
1544 uint16_t igmpv3len;
1545 uint16_t nsrc;
1546
1547 IGMPSTAT_INC(igps_rcv_v3_queries);
1548 igmpv3 = (struct igmpv3 *)igmp;
1549 /*
1550 * Validate length based on source count.
1551 */
1552 nsrc = ntohs(igmpv3->igmp_numsrc);
1553 if (nsrc * sizeof(in_addr_t) >
1554 UINT16_MAX - iphlen - IGMP_V3_QUERY_MINLEN) {
1555 IGMPSTAT_INC(igps_rcv_tooshort);
1556 return (IPPROTO_DONE);
1557 }
1558 /*
1559 * m_pullup() may modify m, so pullup in
1560 * this scope.
1561 */
1562 igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN +
1563 sizeof(struct in_addr) * nsrc;
1564 if ((!M_WRITABLE(m) ||
1565 m->m_len < igmpv3len) &&
1566 (m = m_pullup(m, igmpv3len)) == NULL) {
1567 IGMPSTAT_INC(igps_rcv_tooshort);
1568 return (IPPROTO_DONE);
1569 }
1570 igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *)
1571 + iphlen);
1572 if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) {
1573 m_freem(m);
1574 return (IPPROTO_DONE);
1575 }
1576 }
1577 break;
1578 }
1579 break;
1580
1581 case IGMP_v1_HOST_MEMBERSHIP_REPORT:
1582 if (!V_igmp_v1enable)
1583 break;
1584 if (igmp_input_v1_report(ifp, ip, igmp) != 0) {
1585 m_freem(m);
1586 return (IPPROTO_DONE);
1587 }
1588 break;
1589
1590 case IGMP_v2_HOST_MEMBERSHIP_REPORT:
1591 if (!V_igmp_v2enable)
1592 break;
1593 if (!ip_checkrouteralert(m))
1594 IGMPSTAT_INC(igps_rcv_nora);
1595 if (igmp_input_v2_report(ifp, ip, igmp) != 0) {
1596 m_freem(m);
1597 return (IPPROTO_DONE);
1598 }
1599 break;
1600
1601 case IGMP_v3_HOST_MEMBERSHIP_REPORT:
1602 /*
1603 * Hosts do not need to process IGMPv3 membership reports,
1604 * as report suppression is no longer required.
1605 */
1606 if (!ip_checkrouteralert(m))
1607 IGMPSTAT_INC(igps_rcv_nora);
1608 break;
1609
1610 default:
1611 break;
1612 }
1613
1614 /*
1615 * Pass all valid IGMP packets up to any process(es) listening on a
1616 * raw IGMP socket.
1617 */
1618 *mp = m;
1619 return (rip_input(mp, offp, proto));
1620 }
1621
1622
1623 /*
1624 * Fast timeout handler (global).
1625 * VIMAGE: Timeout handlers are expected to service all vimages.
1626 */
1627 void
1628 igmp_fasttimo(void)
1629 {
1630 VNET_ITERATOR_DECL(vnet_iter);
1631
1632 VNET_LIST_RLOCK_NOSLEEP();
1633 VNET_FOREACH(vnet_iter) {
1634 CURVNET_SET(vnet_iter);
1635 igmp_fasttimo_vnet();
1636 CURVNET_RESTORE();
1637 }
1638 VNET_LIST_RUNLOCK_NOSLEEP();
1639 }
1640
1641 /*
1642 * Fast timeout handler (per-vnet).
1643 * Sends are shuffled off to a netisr to deal with Giant.
1644 *
1645 * VIMAGE: Assume caller has set up our curvnet.
1646 */
1647 static void
1648 igmp_fasttimo_vnet(void)
1649 {
1650 struct mbufq scq; /* State-change packets */
1651 struct mbufq qrq; /* Query response packets */
1652 struct ifnet *ifp;
1653 struct igmp_ifsoftc *igi;
1654 struct ifmultiaddr *ifma;
1655 struct in_multi *inm;
1656 int loop, uri_fasthz;
1657
1658 loop = 0;
1659 uri_fasthz = 0;
1660
1661 /*
1662 * Quick check to see if any work needs to be done, in order to
1663 * minimize the overhead of fasttimo processing.
1664 * SMPng: XXX Unlocked reads.
1665 */
1666 if (!V_current_state_timers_running &&
1667 !V_interface_timers_running &&
1668 !V_state_change_timers_running)
1669 return;
1670
1671 IN_MULTI_LOCK();
1672 IGMP_LOCK();
1673
1674 /*
1675 * IGMPv3 General Query response timer processing.
1676 */
1677 if (V_interface_timers_running) {
1678 CTR1(KTR_IGMPV3, "%s: interface timers running", __func__);
1679
1680 V_interface_timers_running = 0;
1681 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1682 if (igi->igi_v3_timer == 0) {
1683 /* Do nothing. */
1684 } else if (--igi->igi_v3_timer == 0) {
1685 igmp_v3_dispatch_general_query(igi);
1686 } else {
1687 V_interface_timers_running = 1;
1688 }
1689 }
1690 }
1691
1692 if (!V_current_state_timers_running &&
1693 !V_state_change_timers_running)
1694 goto out_locked;
1695
1696 V_current_state_timers_running = 0;
1697 V_state_change_timers_running = 0;
1698
1699 CTR1(KTR_IGMPV3, "%s: state change timers running", __func__);
1700
1701 /*
1702 * IGMPv1/v2/v3 host report and state-change timer processing.
1703 * Note: Processing a v3 group timer may remove a node.
1704 */
1705 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1706 ifp = igi->igi_ifp;
1707
1708 if (igi->igi_version == IGMP_VERSION_3) {
1709 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
1710 uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri *
1711 PR_FASTHZ);
1712 mbufq_init(&qrq, IGMP_MAX_G_GS_PACKETS);
1713 mbufq_init(&scq, IGMP_MAX_STATE_CHANGE_PACKETS);
1714 }
1715
1716 IF_ADDR_RLOCK(ifp);
1717 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1718 if (ifma->ifma_addr->sa_family != AF_INET ||
1719 ifma->ifma_protospec == NULL)
1720 continue;
1721 inm = (struct in_multi *)ifma->ifma_protospec;
1722 switch (igi->igi_version) {
1723 case IGMP_VERSION_1:
1724 case IGMP_VERSION_2:
1725 igmp_v1v2_process_group_timer(inm,
1726 igi->igi_version);
1727 break;
1728 case IGMP_VERSION_3:
1729 igmp_v3_process_group_timers(igi, &qrq,
1730 &scq, inm, uri_fasthz);
1731 break;
1732 }
1733 }
1734 IF_ADDR_RUNLOCK(ifp);
1735
1736 if (igi->igi_version == IGMP_VERSION_3) {
1737 struct in_multi *tinm;
1738
1739 igmp_dispatch_queue(&qrq, 0, loop);
1740 igmp_dispatch_queue(&scq, 0, loop);
1741
1742 /*
1743 * Free the in_multi reference(s) for this
1744 * IGMP lifecycle.
1745 */
1746 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead,
1747 inm_nrele, tinm) {
1748 SLIST_REMOVE_HEAD(&igi->igi_relinmhead,
1749 inm_nrele);
1750 inm_release_locked(inm);
1751 }
1752 }
1753 }
1754
1755 out_locked:
1756 IGMP_UNLOCK();
1757 IN_MULTI_UNLOCK();
1758 }
1759
1760 /*
1761 * Update host report group timer for IGMPv1/v2.
1762 * Will update the global pending timer flags.
1763 */
1764 static void
1765 igmp_v1v2_process_group_timer(struct in_multi *inm, const int version)
1766 {
1767 int report_timer_expired;
1768
1769 IN_MULTI_LOCK_ASSERT();
1770 IGMP_LOCK_ASSERT();
1771
1772 if (inm->inm_timer == 0) {
1773 report_timer_expired = 0;
1774 } else if (--inm->inm_timer == 0) {
1775 report_timer_expired = 1;
1776 } else {
1777 V_current_state_timers_running = 1;
1778 return;
1779 }
1780
1781 switch (inm->inm_state) {
1782 case IGMP_NOT_MEMBER:
1783 case IGMP_SILENT_MEMBER:
1784 case IGMP_IDLE_MEMBER:
1785 case IGMP_LAZY_MEMBER:
1786 case IGMP_SLEEPING_MEMBER:
1787 case IGMP_AWAKENING_MEMBER:
1788 break;
1789 case IGMP_REPORTING_MEMBER:
1790 if (report_timer_expired) {
1791 inm->inm_state = IGMP_IDLE_MEMBER;
1792 (void)igmp_v1v2_queue_report(inm,
1793 (version == IGMP_VERSION_2) ?
1794 IGMP_v2_HOST_MEMBERSHIP_REPORT :
1795 IGMP_v1_HOST_MEMBERSHIP_REPORT);
1796 }
1797 break;
1798 case IGMP_G_QUERY_PENDING_MEMBER:
1799 case IGMP_SG_QUERY_PENDING_MEMBER:
1800 case IGMP_LEAVING_MEMBER:
1801 break;
1802 }
1803 }
1804
1805 /*
1806 * Update a group's timers for IGMPv3.
1807 * Will update the global pending timer flags.
1808 * Note: Unlocked read from igi.
1809 */
1810 static void
1811 igmp_v3_process_group_timers(struct igmp_ifsoftc *igi,
1812 struct mbufq *qrq, struct mbufq *scq,
1813 struct in_multi *inm, const int uri_fasthz)
1814 {
1815 int query_response_timer_expired;
1816 int state_change_retransmit_timer_expired;
1817
1818 IN_MULTI_LOCK_ASSERT();
1819 IGMP_LOCK_ASSERT();
1820
1821 query_response_timer_expired = 0;
1822 state_change_retransmit_timer_expired = 0;
1823
1824 /*
1825 * During a transition from v1/v2 compatibility mode back to v3,
1826 * a group record in REPORTING state may still have its group
1827 * timer active. This is a no-op in this function; it is easier
1828 * to deal with it here than to complicate the slow-timeout path.
1829 */
1830 if (inm->inm_timer == 0) {
1831 query_response_timer_expired = 0;
1832 } else if (--inm->inm_timer == 0) {
1833 query_response_timer_expired = 1;
1834 } else {
1835 V_current_state_timers_running = 1;
1836 }
1837
1838 if (inm->inm_sctimer == 0) {
1839 state_change_retransmit_timer_expired = 0;
1840 } else if (--inm->inm_sctimer == 0) {
1841 state_change_retransmit_timer_expired = 1;
1842 } else {
1843 V_state_change_timers_running = 1;
1844 }
1845
1846 /* We are in fasttimo, so be quick about it. */
1847 if (!state_change_retransmit_timer_expired &&
1848 !query_response_timer_expired)
1849 return;
1850
1851 switch (inm->inm_state) {
1852 case IGMP_NOT_MEMBER:
1853 case IGMP_SILENT_MEMBER:
1854 case IGMP_SLEEPING_MEMBER:
1855 case IGMP_LAZY_MEMBER:
1856 case IGMP_AWAKENING_MEMBER:
1857 case IGMP_IDLE_MEMBER:
1858 break;
1859 case IGMP_G_QUERY_PENDING_MEMBER:
1860 case IGMP_SG_QUERY_PENDING_MEMBER:
1861 /*
1862 * Respond to a previously pending Group-Specific
1863 * or Group-and-Source-Specific query by enqueueing
1864 * the appropriate Current-State report for
1865 * immediate transmission.
1866 */
1867 if (query_response_timer_expired) {
1868 int retval;
1869
1870 retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1,
1871 (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER));
1872 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
1873 __func__, retval);
1874 inm->inm_state = IGMP_REPORTING_MEMBER;
1875 /* XXX Clear recorded sources for next time. */
1876 inm_clear_recorded(inm);
1877 }
1878 /* FALLTHROUGH */
1879 case IGMP_REPORTING_MEMBER:
1880 case IGMP_LEAVING_MEMBER:
1881 if (state_change_retransmit_timer_expired) {
1882 /*
1883 * State-change retransmission timer fired.
1884 * If there are any further pending retransmissions,
1885 * set the global pending state-change flag, and
1886 * reset the timer.
1887 */
1888 if (--inm->inm_scrv > 0) {
1889 inm->inm_sctimer = uri_fasthz;
1890 V_state_change_timers_running = 1;
1891 }
1892 /*
1893 * Retransmit the previously computed state-change
1894 * report. If there are no further pending
1895 * retransmissions, the mbuf queue will be consumed.
1896 * Update T0 state to T1 as we have now sent
1897 * a state-change.
1898 */
1899 (void)igmp_v3_merge_state_changes(inm, scq);
1900
1901 inm_commit(inm);
1902 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
1903 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
1904
1905 /*
1906 * If we are leaving the group for good, make sure
1907 * we release IGMP's reference to it.
1908 * This release must be deferred using a SLIST,
1909 * as we are called from a loop which traverses
1910 * the in_ifmultiaddr TAILQ.
1911 */
1912 if (inm->inm_state == IGMP_LEAVING_MEMBER &&
1913 inm->inm_scrv == 0) {
1914 inm->inm_state = IGMP_NOT_MEMBER;
1915 SLIST_INSERT_HEAD(&igi->igi_relinmhead,
1916 inm, inm_nrele);
1917 }
1918 }
1919 break;
1920 }
1921 }
1922
1923
1924 /*
1925 * Suppress a group's pending response to a group or source/group query.
1926 *
1927 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency.
1928 * Do NOT update ST1/ST0 as this operation merely suppresses
1929 * the currently pending group record.
1930 * Do NOT suppress the response to a general query. It is possible but
1931 * it would require adding another state or flag.
1932 */
1933 static void
1934 igmp_v3_suppress_group_record(struct in_multi *inm)
1935 {
1936
1937 IN_MULTI_LOCK_ASSERT();
1938
1939 KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3,
1940 ("%s: not IGMPv3 mode on link", __func__));
1941
1942 if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER ||
1943 inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER)
1944 return;
1945
1946 if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
1947 inm_clear_recorded(inm);
1948
1949 inm->inm_timer = 0;
1950 inm->inm_state = IGMP_REPORTING_MEMBER;
1951 }
1952
1953 /*
1954 * Switch to a different IGMP version on the given interface,
1955 * as per Section 7.2.1.
1956 */
1957 static void
1958 igmp_set_version(struct igmp_ifsoftc *igi, const int version)
1959 {
1960 int old_version_timer;
1961
1962 IGMP_LOCK_ASSERT();
1963
1964 CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__,
1965 version, igi->igi_ifp, igi->igi_ifp->if_xname);
1966
1967 if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) {
1968 /*
1969 * Compute the "Older Version Querier Present" timer as per
1970 * Section 8.12.
1971 */
1972 old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri;
1973 old_version_timer *= PR_SLOWHZ;
1974
1975 if (version == IGMP_VERSION_1) {
1976 igi->igi_v1_timer = old_version_timer;
1977 igi->igi_v2_timer = 0;
1978 } else if (version == IGMP_VERSION_2) {
1979 igi->igi_v1_timer = 0;
1980 igi->igi_v2_timer = old_version_timer;
1981 }
1982 }
1983
1984 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
1985 if (igi->igi_version != IGMP_VERSION_2) {
1986 igi->igi_version = IGMP_VERSION_2;
1987 igmp_v3_cancel_link_timers(igi);
1988 }
1989 } else if (igi->igi_v1_timer > 0) {
1990 if (igi->igi_version != IGMP_VERSION_1) {
1991 igi->igi_version = IGMP_VERSION_1;
1992 igmp_v3_cancel_link_timers(igi);
1993 }
1994 }
1995 }
1996
1997 /*
1998 * Cancel pending IGMPv3 timers for the given link and all groups
1999 * joined on it; state-change, general-query, and group-query timers.
2000 *
2001 * Only ever called on a transition from v3 to Compatibility mode. Kill
2002 * the timers stone dead (this may be expensive for large N groups), they
2003 * will be restarted if Compatibility Mode deems that they must be due to
2004 * query processing.
2005 */
2006 static void
2007 igmp_v3_cancel_link_timers(struct igmp_ifsoftc *igi)
2008 {
2009 struct ifmultiaddr *ifma;
2010 struct ifnet *ifp;
2011 struct in_multi *inm, *tinm;
2012
2013 CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__,
2014 igi->igi_ifp, igi->igi_ifp->if_xname);
2015
2016 IN_MULTI_LOCK_ASSERT();
2017 IGMP_LOCK_ASSERT();
2018
2019 /*
2020 * Stop the v3 General Query Response on this link stone dead.
2021 * If fasttimo is woken up due to V_interface_timers_running,
2022 * the flag will be cleared if there are no pending link timers.
2023 */
2024 igi->igi_v3_timer = 0;
2025
2026 /*
2027 * Now clear the current-state and state-change report timers
2028 * for all memberships scoped to this link.
2029 */
2030 ifp = igi->igi_ifp;
2031 IF_ADDR_RLOCK(ifp);
2032 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2033 if (ifma->ifma_addr->sa_family != AF_INET ||
2034 ifma->ifma_protospec == NULL)
2035 continue;
2036 inm = (struct in_multi *)ifma->ifma_protospec;
2037 switch (inm->inm_state) {
2038 case IGMP_NOT_MEMBER:
2039 case IGMP_SILENT_MEMBER:
2040 case IGMP_IDLE_MEMBER:
2041 case IGMP_LAZY_MEMBER:
2042 case IGMP_SLEEPING_MEMBER:
2043 case IGMP_AWAKENING_MEMBER:
2044 /*
2045 * These states are either not relevant in v3 mode,
2046 * or are unreported. Do nothing.
2047 */
2048 break;
2049 case IGMP_LEAVING_MEMBER:
2050 /*
2051 * If we are leaving the group and switching to
2052 * compatibility mode, we need to release the final
2053 * reference held for issuing the INCLUDE {}, and
2054 * transition to REPORTING to ensure the host leave
2055 * message is sent upstream to the old querier --
2056 * transition to NOT would lose the leave and race.
2057 */
2058 SLIST_INSERT_HEAD(&igi->igi_relinmhead, inm, inm_nrele);
2059 /* FALLTHROUGH */
2060 case IGMP_G_QUERY_PENDING_MEMBER:
2061 case IGMP_SG_QUERY_PENDING_MEMBER:
2062 inm_clear_recorded(inm);
2063 /* FALLTHROUGH */
2064 case IGMP_REPORTING_MEMBER:
2065 inm->inm_state = IGMP_REPORTING_MEMBER;
2066 break;
2067 }
2068 /*
2069 * Always clear state-change and group report timers.
2070 * Free any pending IGMPv3 state-change records.
2071 */
2072 inm->inm_sctimer = 0;
2073 inm->inm_timer = 0;
2074 mbufq_drain(&inm->inm_scq);
2075 }
2076 IF_ADDR_RUNLOCK(ifp);
2077 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele, tinm) {
2078 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele);
2079 inm_release_locked(inm);
2080 }
2081 }
2082
2083 /*
2084 * Update the Older Version Querier Present timers for a link.
2085 * See Section 7.2.1 of RFC 3376.
2086 */
2087 static void
2088 igmp_v1v2_process_querier_timers(struct igmp_ifsoftc *igi)
2089 {
2090
2091 IGMP_LOCK_ASSERT();
2092
2093 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) {
2094 /*
2095 * IGMPv1 and IGMPv2 Querier Present timers expired.
2096 *
2097 * Revert to IGMPv3.
2098 */
2099 if (igi->igi_version != IGMP_VERSION_3) {
2100 CTR5(KTR_IGMPV3,
2101 "%s: transition from v%d -> v%d on %p(%s)",
2102 __func__, igi->igi_version, IGMP_VERSION_3,
2103 igi->igi_ifp, igi->igi_ifp->if_xname);
2104 igi->igi_version = IGMP_VERSION_3;
2105 }
2106 } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2107 /*
2108 * IGMPv1 Querier Present timer expired,
2109 * IGMPv2 Querier Present timer running.
2110 * If IGMPv2 was disabled since last timeout,
2111 * revert to IGMPv3.
2112 * If IGMPv2 is enabled, revert to IGMPv2.
2113 */
2114 if (!V_igmp_v2enable) {
2115 CTR5(KTR_IGMPV3,
2116 "%s: transition from v%d -> v%d on %p(%s)",
2117 __func__, igi->igi_version, IGMP_VERSION_3,
2118 igi->igi_ifp, igi->igi_ifp->if_xname);
2119 igi->igi_v2_timer = 0;
2120 igi->igi_version = IGMP_VERSION_3;
2121 } else {
2122 --igi->igi_v2_timer;
2123 if (igi->igi_version != IGMP_VERSION_2) {
2124 CTR5(KTR_IGMPV3,
2125 "%s: transition from v%d -> v%d on %p(%s)",
2126 __func__, igi->igi_version, IGMP_VERSION_2,
2127 igi->igi_ifp, igi->igi_ifp->if_xname);
2128 igi->igi_version = IGMP_VERSION_2;
2129 igmp_v3_cancel_link_timers(igi);
2130 }
2131 }
2132 } else if (igi->igi_v1_timer > 0) {
2133 /*
2134 * IGMPv1 Querier Present timer running.
2135 * Stop IGMPv2 timer if running.
2136 *
2137 * If IGMPv1 was disabled since last timeout,
2138 * revert to IGMPv3.
2139 * If IGMPv1 is enabled, reset IGMPv2 timer if running.
2140 */
2141 if (!V_igmp_v1enable) {
2142 CTR5(KTR_IGMPV3,
2143 "%s: transition from v%d -> v%d on %p(%s)",
2144 __func__, igi->igi_version, IGMP_VERSION_3,
2145 igi->igi_ifp, igi->igi_ifp->if_xname);
2146 igi->igi_v1_timer = 0;
2147 igi->igi_version = IGMP_VERSION_3;
2148 } else {
2149 --igi->igi_v1_timer;
2150 }
2151 if (igi->igi_v2_timer > 0) {
2152 CTR3(KTR_IGMPV3,
2153 "%s: cancel v2 timer on %p(%s)",
2154 __func__, igi->igi_ifp, igi->igi_ifp->if_xname);
2155 igi->igi_v2_timer = 0;
2156 }
2157 }
2158 }
2159
2160 /*
2161 * Global slowtimo handler.
2162 * VIMAGE: Timeout handlers are expected to service all vimages.
2163 */
2164 void
2165 igmp_slowtimo(void)
2166 {
2167 VNET_ITERATOR_DECL(vnet_iter);
2168
2169 VNET_LIST_RLOCK_NOSLEEP();
2170 VNET_FOREACH(vnet_iter) {
2171 CURVNET_SET(vnet_iter);
2172 igmp_slowtimo_vnet();
2173 CURVNET_RESTORE();
2174 }
2175 VNET_LIST_RUNLOCK_NOSLEEP();
2176 }
2177
2178 /*
2179 * Per-vnet slowtimo handler.
2180 */
2181 static void
2182 igmp_slowtimo_vnet(void)
2183 {
2184 struct igmp_ifsoftc *igi;
2185
2186 IGMP_LOCK();
2187
2188 LIST_FOREACH(igi, &V_igi_head, igi_link) {
2189 igmp_v1v2_process_querier_timers(igi);
2190 }
2191
2192 IGMP_UNLOCK();
2193 }
2194
2195 /*
2196 * Dispatch an IGMPv1/v2 host report or leave message.
2197 * These are always small enough to fit inside a single mbuf.
2198 */
2199 static int
2200 igmp_v1v2_queue_report(struct in_multi *inm, const int type)
2201 {
2202 struct ifnet *ifp;
2203 struct igmp *igmp;
2204 struct ip *ip;
2205 struct mbuf *m;
2206
2207 IN_MULTI_LOCK_ASSERT();
2208 IGMP_LOCK_ASSERT();
2209
2210 ifp = inm->inm_ifp;
2211
2212 m = m_gethdr(M_NOWAIT, MT_DATA);
2213 if (m == NULL)
2214 return (ENOMEM);
2215 M_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp));
2216
2217 m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp);
2218
2219 m->m_data += sizeof(struct ip);
2220 m->m_len = sizeof(struct igmp);
2221
2222 igmp = mtod(m, struct igmp *);
2223 igmp->igmp_type = type;
2224 igmp->igmp_code = 0;
2225 igmp->igmp_group = inm->inm_addr;
2226 igmp->igmp_cksum = 0;
2227 igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp));
2228
2229 m->m_data -= sizeof(struct ip);
2230 m->m_len += sizeof(struct ip);
2231
2232 ip = mtod(m, struct ip *);
2233 ip->ip_tos = 0;
2234 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct igmp));
2235 ip->ip_off = 0;
2236 ip->ip_p = IPPROTO_IGMP;
2237 ip->ip_src.s_addr = INADDR_ANY;
2238
2239 if (type == IGMP_HOST_LEAVE_MESSAGE)
2240 ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP);
2241 else
2242 ip->ip_dst = inm->inm_addr;
2243
2244 igmp_save_context(m, ifp);
2245
2246 m->m_flags |= M_IGMPV2;
2247 if (inm->inm_igi->igi_flags & IGIF_LOOPBACK)
2248 m->m_flags |= M_IGMP_LOOP;
2249
2250 CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m);
2251 netisr_dispatch(NETISR_IGMP, m);
2252
2253 return (0);
2254 }
2255
2256 /*
2257 * Process a state change from the upper layer for the given IPv4 group.
2258 *
2259 * Each socket holds a reference on the in_multi in its own ip_moptions.
2260 * The socket layer will have made the necessary updates to.the group
2261 * state, it is now up to IGMP to issue a state change report if there
2262 * has been any change between T0 (when the last state-change was issued)
2263 * and T1 (now).
2264 *
2265 * We use the IGMPv3 state machine at group level. The IGMP module
2266 * however makes the decision as to which IGMP protocol version to speak.
2267 * A state change *from* INCLUDE {} always means an initial join.
2268 * A state change *to* INCLUDE {} always means a final leave.
2269 *
2270 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can
2271 * save ourselves a bunch of work; any exclusive mode groups need not
2272 * compute source filter lists.
2273 *
2274 * VIMAGE: curvnet should have been set by caller, as this routine
2275 * is called from the socket option handlers.
2276 */
2277 int
2278 igmp_change_state(struct in_multi *inm)
2279 {
2280 struct igmp_ifsoftc *igi;
2281 struct ifnet *ifp;
2282 int error;
2283
2284 IN_MULTI_LOCK_ASSERT();
2285
2286 error = 0;
2287
2288 /*
2289 * Try to detect if the upper layer just asked us to change state
2290 * for an interface which has now gone away.
2291 */
2292 KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__));
2293 ifp = inm->inm_ifma->ifma_ifp;
2294 /*
2295 * Sanity check that netinet's notion of ifp is the
2296 * same as net's.
2297 */
2298 KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__));
2299
2300 IGMP_LOCK();
2301
2302 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
2303 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
2304
2305 /*
2306 * If we detect a state transition to or from MCAST_UNDEFINED
2307 * for this group, then we are starting or finishing an IGMP
2308 * life cycle for this group.
2309 */
2310 if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) {
2311 CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__,
2312 inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode);
2313 if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) {
2314 CTR1(KTR_IGMPV3, "%s: initial join", __func__);
2315 error = igmp_initial_join(inm, igi);
2316 goto out_locked;
2317 } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) {
2318 CTR1(KTR_IGMPV3, "%s: final leave", __func__);
2319 igmp_final_leave(inm, igi);
2320 goto out_locked;
2321 }
2322 } else {
2323 CTR1(KTR_IGMPV3, "%s: filter set change", __func__);
2324 }
2325
2326 error = igmp_handle_state_change(inm, igi);
2327
2328 out_locked:
2329 IGMP_UNLOCK();
2330 return (error);
2331 }
2332
2333 /*
2334 * Perform the initial join for an IGMP group.
2335 *
2336 * When joining a group:
2337 * If the group should have its IGMP traffic suppressed, do nothing.
2338 * IGMPv1 starts sending IGMPv1 host membership reports.
2339 * IGMPv2 starts sending IGMPv2 host membership reports.
2340 * IGMPv3 will schedule an IGMPv3 state-change report containing the
2341 * initial state of the membership.
2342 */
2343 static int
2344 igmp_initial_join(struct in_multi *inm, struct igmp_ifsoftc *igi)
2345 {
2346 struct ifnet *ifp;
2347 struct mbufq *mq;
2348 int error, retval, syncstates;
2349
2350 CTR4(KTR_IGMPV3, "%s: initial join %s on ifp %p(%s)",
2351 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
2352 inm->inm_ifp->if_xname);
2353
2354 error = 0;
2355 syncstates = 1;
2356
2357 ifp = inm->inm_ifp;
2358
2359 IN_MULTI_LOCK_ASSERT();
2360 IGMP_LOCK_ASSERT();
2361
2362 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2363
2364 /*
2365 * Groups joined on loopback or marked as 'not reported',
2366 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and
2367 * are never reported in any IGMP protocol exchanges.
2368 * All other groups enter the appropriate IGMP state machine
2369 * for the version in use on this link.
2370 * A link marked as IGIF_SILENT causes IGMP to be completely
2371 * disabled for the link.
2372 */
2373 if ((ifp->if_flags & IFF_LOOPBACK) ||
2374 (igi->igi_flags & IGIF_SILENT) ||
2375 !igmp_isgroupreported(inm->inm_addr)) {
2376 CTR1(KTR_IGMPV3,
2377 "%s: not kicking state machine for silent group", __func__);
2378 inm->inm_state = IGMP_SILENT_MEMBER;
2379 inm->inm_timer = 0;
2380 } else {
2381 /*
2382 * Deal with overlapping in_multi lifecycle.
2383 * If this group was LEAVING, then make sure
2384 * we drop the reference we picked up to keep the
2385 * group around for the final INCLUDE {} enqueue.
2386 */
2387 if (igi->igi_version == IGMP_VERSION_3 &&
2388 inm->inm_state == IGMP_LEAVING_MEMBER)
2389 inm_release_locked(inm);
2390
2391 inm->inm_state = IGMP_REPORTING_MEMBER;
2392
2393 switch (igi->igi_version) {
2394 case IGMP_VERSION_1:
2395 case IGMP_VERSION_2:
2396 inm->inm_state = IGMP_IDLE_MEMBER;
2397 error = igmp_v1v2_queue_report(inm,
2398 (igi->igi_version == IGMP_VERSION_2) ?
2399 IGMP_v2_HOST_MEMBERSHIP_REPORT :
2400 IGMP_v1_HOST_MEMBERSHIP_REPORT);
2401 if (error == 0) {
2402 inm->inm_timer = IGMP_RANDOM_DELAY(
2403 IGMP_V1V2_MAX_RI * PR_FASTHZ);
2404 V_current_state_timers_running = 1;
2405 }
2406 break;
2407
2408 case IGMP_VERSION_3:
2409 /*
2410 * Defer update of T0 to T1, until the first copy
2411 * of the state change has been transmitted.
2412 */
2413 syncstates = 0;
2414
2415 /*
2416 * Immediately enqueue a State-Change Report for
2417 * this interface, freeing any previous reports.
2418 * Don't kick the timers if there is nothing to do,
2419 * or if an error occurred.
2420 */
2421 mq = &inm->inm_scq;
2422 mbufq_drain(mq);
2423 retval = igmp_v3_enqueue_group_record(mq, inm, 1,
2424 0, 0);
2425 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
2426 __func__, retval);
2427 if (retval <= 0) {
2428 error = retval * -1;
2429 break;
2430 }
2431
2432 /*
2433 * Schedule transmission of pending state-change
2434 * report up to RV times for this link. The timer
2435 * will fire at the next igmp_fasttimo (~200ms),
2436 * giving us an opportunity to merge the reports.
2437 */
2438 if (igi->igi_flags & IGIF_LOOPBACK) {
2439 inm->inm_scrv = 1;
2440 } else {
2441 KASSERT(igi->igi_rv > 1,
2442 ("%s: invalid robustness %d", __func__,
2443 igi->igi_rv));
2444 inm->inm_scrv = igi->igi_rv;
2445 }
2446 inm->inm_sctimer = 1;
2447 V_state_change_timers_running = 1;
2448
2449 error = 0;
2450 break;
2451 }
2452 }
2453
2454 /*
2455 * Only update the T0 state if state change is atomic,
2456 * i.e. we don't need to wait for a timer to fire before we
2457 * can consider the state change to have been communicated.
2458 */
2459 if (syncstates) {
2460 inm_commit(inm);
2461 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
2462 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2463 }
2464
2465 return (error);
2466 }
2467
2468 /*
2469 * Issue an intermediate state change during the IGMP life-cycle.
2470 */
2471 static int
2472 igmp_handle_state_change(struct in_multi *inm, struct igmp_ifsoftc *igi)
2473 {
2474 struct ifnet *ifp;
2475 int retval;
2476
2477 CTR4(KTR_IGMPV3, "%s: state change for %s on ifp %p(%s)",
2478 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
2479 inm->inm_ifp->if_xname);
2480
2481 ifp = inm->inm_ifp;
2482
2483 IN_MULTI_LOCK_ASSERT();
2484 IGMP_LOCK_ASSERT();
2485
2486 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2487
2488 if ((ifp->if_flags & IFF_LOOPBACK) ||
2489 (igi->igi_flags & IGIF_SILENT) ||
2490 !igmp_isgroupreported(inm->inm_addr) ||
2491 (igi->igi_version != IGMP_VERSION_3)) {
2492 if (!igmp_isgroupreported(inm->inm_addr)) {
2493 CTR1(KTR_IGMPV3,
2494 "%s: not kicking state machine for silent group", __func__);
2495 }
2496 CTR1(KTR_IGMPV3, "%s: nothing to do", __func__);
2497 inm_commit(inm);
2498 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
2499 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2500 return (0);
2501 }
2502
2503 mbufq_drain(&inm->inm_scq);
2504
2505 retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0);
2506 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval);
2507 if (retval <= 0)
2508 return (-retval);
2509
2510 /*
2511 * If record(s) were enqueued, start the state-change
2512 * report timer for this group.
2513 */
2514 inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv);
2515 inm->inm_sctimer = 1;
2516 V_state_change_timers_running = 1;
2517
2518 return (0);
2519 }
2520
2521 /*
2522 * Perform the final leave for an IGMP group.
2523 *
2524 * When leaving a group:
2525 * IGMPv1 does nothing.
2526 * IGMPv2 sends a host leave message, if and only if we are the reporter.
2527 * IGMPv3 enqueues a state-change report containing a transition
2528 * to INCLUDE {} for immediate transmission.
2529 */
2530 static void
2531 igmp_final_leave(struct in_multi *inm, struct igmp_ifsoftc *igi)
2532 {
2533 int syncstates;
2534
2535 syncstates = 1;
2536
2537 CTR4(KTR_IGMPV3, "%s: final leave %s on ifp %p(%s)",
2538 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
2539 inm->inm_ifp->if_xname);
2540
2541 IN_MULTI_LOCK_ASSERT();
2542 IGMP_LOCK_ASSERT();
2543
2544 switch (inm->inm_state) {
2545 case IGMP_NOT_MEMBER:
2546 case IGMP_SILENT_MEMBER:
2547 case IGMP_LEAVING_MEMBER:
2548 /* Already leaving or left; do nothing. */
2549 CTR1(KTR_IGMPV3,
2550 "%s: not kicking state machine for silent group", __func__);
2551 break;
2552 case IGMP_REPORTING_MEMBER:
2553 case IGMP_IDLE_MEMBER:
2554 case IGMP_G_QUERY_PENDING_MEMBER:
2555 case IGMP_SG_QUERY_PENDING_MEMBER:
2556 if (igi->igi_version == IGMP_VERSION_2) {
2557 #ifdef INVARIANTS
2558 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
2559 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
2560 panic("%s: IGMPv3 state reached, not IGMPv3 mode",
2561 __func__);
2562 #endif
2563 igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE);
2564 inm->inm_state = IGMP_NOT_MEMBER;
2565 } else if (igi->igi_version == IGMP_VERSION_3) {
2566 /*
2567 * Stop group timer and all pending reports.
2568 * Immediately enqueue a state-change report
2569 * TO_IN {} to be sent on the next fast timeout,
2570 * giving us an opportunity to merge reports.
2571 */
2572 mbufq_drain(&inm->inm_scq);
2573 inm->inm_timer = 0;
2574 if (igi->igi_flags & IGIF_LOOPBACK) {
2575 inm->inm_scrv = 1;
2576 } else {
2577 inm->inm_scrv = igi->igi_rv;
2578 }
2579 CTR4(KTR_IGMPV3, "%s: Leaving %s/%s with %d "
2580 "pending retransmissions.", __func__,
2581 inet_ntoa(inm->inm_addr),
2582 inm->inm_ifp->if_xname, inm->inm_scrv);
2583 if (inm->inm_scrv == 0) {
2584 inm->inm_state = IGMP_NOT_MEMBER;
2585 inm->inm_sctimer = 0;
2586 } else {
2587 int retval;
2588
2589 inm_acquire_locked(inm);
2590
2591 retval = igmp_v3_enqueue_group_record(
2592 &inm->inm_scq, inm, 1, 0, 0);
2593 KASSERT(retval != 0,
2594 ("%s: enqueue record = %d", __func__,
2595 retval));
2596
2597 inm->inm_state = IGMP_LEAVING_MEMBER;
2598 inm->inm_sctimer = 1;
2599 V_state_change_timers_running = 1;
2600 syncstates = 0;
2601 }
2602 break;
2603 }
2604 break;
2605 case IGMP_LAZY_MEMBER:
2606 case IGMP_SLEEPING_MEMBER:
2607 case IGMP_AWAKENING_MEMBER:
2608 /* Our reports are suppressed; do nothing. */
2609 break;
2610 }
2611
2612 if (syncstates) {
2613 inm_commit(inm);
2614 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
2615 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2616 inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
2617 CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for %s/%s",
2618 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2619 }
2620 }
2621
2622 /*
2623 * Enqueue an IGMPv3 group record to the given output queue.
2624 *
2625 * XXX This function could do with having the allocation code
2626 * split out, and the multiple-tree-walks coalesced into a single
2627 * routine as has been done in igmp_v3_enqueue_filter_change().
2628 *
2629 * If is_state_change is zero, a current-state record is appended.
2630 * If is_state_change is non-zero, a state-change report is appended.
2631 *
2632 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2633 * If is_group_query is zero, and if there is a packet with free space
2634 * at the tail of the queue, it will be appended to providing there
2635 * is enough free space.
2636 * Otherwise a new mbuf packet chain is allocated.
2637 *
2638 * If is_source_query is non-zero, each source is checked to see if
2639 * it was recorded for a Group-Source query, and will be omitted if
2640 * it is not both in-mode and recorded.
2641 *
2642 * The function will attempt to allocate leading space in the packet
2643 * for the IP/IGMP header to be prepended without fragmenting the chain.
2644 *
2645 * If successful the size of all data appended to the queue is returned,
2646 * otherwise an error code less than zero is returned, or zero if
2647 * no record(s) were appended.
2648 */
2649 static int
2650 igmp_v3_enqueue_group_record(struct mbufq *mq, struct in_multi *inm,
2651 const int is_state_change, const int is_group_query,
2652 const int is_source_query)
2653 {
2654 struct igmp_grouprec ig;
2655 struct igmp_grouprec *pig;
2656 struct ifnet *ifp;
2657 struct ip_msource *ims, *nims;
2658 struct mbuf *m0, *m, *md;
2659 int error, is_filter_list_change;
2660 int minrec0len, m0srcs, msrcs, nbytes, off;
2661 int record_has_sources;
2662 int now;
2663 int type;
2664 in_addr_t naddr;
2665 uint8_t mode;
2666
2667 IN_MULTI_LOCK_ASSERT();
2668
2669 error = 0;
2670 ifp = inm->inm_ifp;
2671 is_filter_list_change = 0;
2672 m = NULL;
2673 m0 = NULL;
2674 m0srcs = 0;
2675 msrcs = 0;
2676 nbytes = 0;
2677 nims = NULL;
2678 record_has_sources = 1;
2679 pig = NULL;
2680 type = IGMP_DO_NOTHING;
2681 mode = inm->inm_st[1].iss_fmode;
2682
2683 /*
2684 * If we did not transition out of ASM mode during t0->t1,
2685 * and there are no source nodes to process, we can skip
2686 * the generation of source records.
2687 */
2688 if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 &&
2689 inm->inm_nsrc == 0)
2690 record_has_sources = 0;
2691
2692 if (is_state_change) {
2693 /*
2694 * Queue a state change record.
2695 * If the mode did not change, and there are non-ASM
2696 * listeners or source filters present,
2697 * we potentially need to issue two records for the group.
2698 * If we are transitioning to MCAST_UNDEFINED, we need
2699 * not send any sources.
2700 * If there are ASM listeners, and there was no filter
2701 * mode transition of any kind, do nothing.
2702 */
2703 if (mode != inm->inm_st[0].iss_fmode) {
2704 if (mode == MCAST_EXCLUDE) {
2705 CTR1(KTR_IGMPV3, "%s: change to EXCLUDE",
2706 __func__);
2707 type = IGMP_CHANGE_TO_EXCLUDE_MODE;
2708 } else {
2709 CTR1(KTR_IGMPV3, "%s: change to INCLUDE",
2710 __func__);
2711 type = IGMP_CHANGE_TO_INCLUDE_MODE;
2712 if (mode == MCAST_UNDEFINED)
2713 record_has_sources = 0;
2714 }
2715 } else {
2716 if (record_has_sources) {
2717 is_filter_list_change = 1;
2718 } else {
2719 type = IGMP_DO_NOTHING;
2720 }
2721 }
2722 } else {
2723 /*
2724 * Queue a current state record.
2725 */
2726 if (mode == MCAST_EXCLUDE) {
2727 type = IGMP_MODE_IS_EXCLUDE;
2728 } else if (mode == MCAST_INCLUDE) {
2729 type = IGMP_MODE_IS_INCLUDE;
2730 KASSERT(inm->inm_st[1].iss_asm == 0,
2731 ("%s: inm %p is INCLUDE but ASM count is %d",
2732 __func__, inm, inm->inm_st[1].iss_asm));
2733 }
2734 }
2735
2736 /*
2737 * Generate the filter list changes using a separate function.
2738 */
2739 if (is_filter_list_change)
2740 return (igmp_v3_enqueue_filter_change(mq, inm));
2741
2742 if (type == IGMP_DO_NOTHING) {
2743 CTR3(KTR_IGMPV3, "%s: nothing to do for %s/%s",
2744 __func__, inet_ntoa(inm->inm_addr),
2745 inm->inm_ifp->if_xname);
2746 return (0);
2747 }
2748
2749 /*
2750 * If any sources are present, we must be able to fit at least
2751 * one in the trailing space of the tail packet's mbuf,
2752 * ideally more.
2753 */
2754 minrec0len = sizeof(struct igmp_grouprec);
2755 if (record_has_sources)
2756 minrec0len += sizeof(in_addr_t);
2757
2758 CTR4(KTR_IGMPV3, "%s: queueing %s for %s/%s", __func__,
2759 igmp_rec_type_to_str(type), inet_ntoa(inm->inm_addr),
2760 inm->inm_ifp->if_xname);
2761
2762 /*
2763 * Check if we have a packet in the tail of the queue for this
2764 * group into which the first group record for this group will fit.
2765 * Otherwise allocate a new packet.
2766 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT.
2767 * Note: Group records for G/GSR query responses MUST be sent
2768 * in their own packet.
2769 */
2770 m0 = mbufq_last(mq);
2771 if (!is_group_query &&
2772 m0 != NULL &&
2773 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) &&
2774 (m0->m_pkthdr.len + minrec0len) <
2775 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
2776 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2777 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2778 m = m0;
2779 CTR1(KTR_IGMPV3, "%s: use existing packet", __func__);
2780 } else {
2781 if (mbufq_full(mq)) {
2782 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2783 return (-ENOMEM);
2784 }
2785 m = NULL;
2786 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2787 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2788 if (!is_state_change && !is_group_query) {
2789 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2790 if (m)
2791 m->m_data += IGMP_LEADINGSPACE;
2792 }
2793 if (m == NULL) {
2794 m = m_gethdr(M_NOWAIT, MT_DATA);
2795 if (m)
2796 M_ALIGN(m, IGMP_LEADINGSPACE);
2797 }
2798 if (m == NULL)
2799 return (-ENOMEM);
2800
2801 igmp_save_context(m, ifp);
2802
2803 CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__);
2804 }
2805
2806 /*
2807 * Append group record.
2808 * If we have sources, we don't know how many yet.
2809 */
2810 ig.ig_type = type;
2811 ig.ig_datalen = 0;
2812 ig.ig_numsrc = 0;
2813 ig.ig_group = inm->inm_addr;
2814 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2815 if (m != m0)
2816 m_freem(m);
2817 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2818 return (-ENOMEM);
2819 }
2820 nbytes += sizeof(struct igmp_grouprec);
2821
2822 /*
2823 * Append as many sources as will fit in the first packet.
2824 * If we are appending to a new packet, the chain allocation
2825 * may potentially use clusters; use m_getptr() in this case.
2826 * If we are appending to an existing packet, we need to obtain
2827 * a pointer to the group record after m_append(), in case a new
2828 * mbuf was allocated.
2829 * Only append sources which are in-mode at t1. If we are
2830 * transitioning to MCAST_UNDEFINED state on the group, do not
2831 * include source entries.
2832 * Only report recorded sources in our filter set when responding
2833 * to a group-source query.
2834 */
2835 if (record_has_sources) {
2836 if (m == m0) {
2837 md = m_last(m);
2838 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2839 md->m_len - nbytes);
2840 } else {
2841 md = m_getptr(m, 0, &off);
2842 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2843 off);
2844 }
2845 msrcs = 0;
2846 RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) {
2847 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__,
2848 inet_ntoa_haddr(ims->ims_haddr));
2849 now = ims_get_mode(inm, ims, 1);
2850 CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now);
2851 if ((now != mode) ||
2852 (now == mode && mode == MCAST_UNDEFINED)) {
2853 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2854 continue;
2855 }
2856 if (is_source_query && ims->ims_stp == 0) {
2857 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2858 __func__);
2859 continue;
2860 }
2861 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2862 naddr = htonl(ims->ims_haddr);
2863 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2864 if (m != m0)
2865 m_freem(m);
2866 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2867 __func__);
2868 return (-ENOMEM);
2869 }
2870 nbytes += sizeof(in_addr_t);
2871 ++msrcs;
2872 if (msrcs == m0srcs)
2873 break;
2874 }
2875 CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__,
2876 msrcs);
2877 pig->ig_numsrc = htons(msrcs);
2878 nbytes += (msrcs * sizeof(in_addr_t));
2879 }
2880
2881 if (is_source_query && msrcs == 0) {
2882 CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__);
2883 if (m != m0)
2884 m_freem(m);
2885 return (0);
2886 }
2887
2888 /*
2889 * We are good to go with first packet.
2890 */
2891 if (m != m0) {
2892 CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__);
2893 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2894 mbufq_enqueue(mq, m);
2895 } else
2896 m->m_pkthdr.PH_vt.vt_nrecs++;
2897
2898 /*
2899 * No further work needed if no source list in packet(s).
2900 */
2901 if (!record_has_sources)
2902 return (nbytes);
2903
2904 /*
2905 * Whilst sources remain to be announced, we need to allocate
2906 * a new packet and fill out as many sources as will fit.
2907 * Always try for a cluster first.
2908 */
2909 while (nims != NULL) {
2910 if (mbufq_full(mq)) {
2911 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2912 return (-ENOMEM);
2913 }
2914 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2915 if (m)
2916 m->m_data += IGMP_LEADINGSPACE;
2917 if (m == NULL) {
2918 m = m_gethdr(M_NOWAIT, MT_DATA);
2919 if (m)
2920 M_ALIGN(m, IGMP_LEADINGSPACE);
2921 }
2922 if (m == NULL)
2923 return (-ENOMEM);
2924 igmp_save_context(m, ifp);
2925 md = m_getptr(m, 0, &off);
2926 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off);
2927 CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__);
2928
2929 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2930 if (m != m0)
2931 m_freem(m);
2932 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2933 return (-ENOMEM);
2934 }
2935 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2936 nbytes += sizeof(struct igmp_grouprec);
2937
2938 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2939 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2940
2941 msrcs = 0;
2942 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
2943 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__,
2944 inet_ntoa_haddr(ims->ims_haddr));
2945 now = ims_get_mode(inm, ims, 1);
2946 if ((now != mode) ||
2947 (now == mode && mode == MCAST_UNDEFINED)) {
2948 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2949 continue;
2950 }
2951 if (is_source_query && ims->ims_stp == 0) {
2952 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2953 __func__);
2954 continue;
2955 }
2956 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2957 naddr = htonl(ims->ims_haddr);
2958 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2959 if (m != m0)
2960 m_freem(m);
2961 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2962 __func__);
2963 return (-ENOMEM);
2964 }
2965 ++msrcs;
2966 if (msrcs == m0srcs)
2967 break;
2968 }
2969 pig->ig_numsrc = htons(msrcs);
2970 nbytes += (msrcs * sizeof(in_addr_t));
2971
2972 CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__);
2973 mbufq_enqueue(mq, m);
2974 }
2975
2976 return (nbytes);
2977 }
2978
2979 /*
2980 * Type used to mark record pass completion.
2981 * We exploit the fact we can cast to this easily from the
2982 * current filter modes on each ip_msource node.
2983 */
2984 typedef enum {
2985 REC_NONE = 0x00, /* MCAST_UNDEFINED */
2986 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
2987 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
2988 REC_FULL = REC_ALLOW | REC_BLOCK
2989 } rectype_t;
2990
2991 /*
2992 * Enqueue an IGMPv3 filter list change to the given output queue.
2993 *
2994 * Source list filter state is held in an RB-tree. When the filter list
2995 * for a group is changed without changing its mode, we need to compute
2996 * the deltas between T0 and T1 for each source in the filter set,
2997 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
2998 *
2999 * As we may potentially queue two record types, and the entire R-B tree
3000 * needs to be walked at once, we break this out into its own function
3001 * so we can generate a tightly packed queue of packets.
3002 *
3003 * XXX This could be written to only use one tree walk, although that makes
3004 * serializing into the mbuf chains a bit harder. For now we do two walks
3005 * which makes things easier on us, and it may or may not be harder on
3006 * the L2 cache.
3007 *
3008 * If successful the size of all data appended to the queue is returned,
3009 * otherwise an error code less than zero is returned, or zero if
3010 * no record(s) were appended.
3011 */
3012 static int
3013 igmp_v3_enqueue_filter_change(struct mbufq *mq, struct in_multi *inm)
3014 {
3015 static const int MINRECLEN =
3016 sizeof(struct igmp_grouprec) + sizeof(in_addr_t);
3017 struct ifnet *ifp;
3018 struct igmp_grouprec ig;
3019 struct igmp_grouprec *pig;
3020 struct ip_msource *ims, *nims;
3021 struct mbuf *m, *m0, *md;
3022 in_addr_t naddr;
3023 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
3024 int nallow, nblock;
3025 uint8_t mode, now, then;
3026 rectype_t crt, drt, nrt;
3027
3028 IN_MULTI_LOCK_ASSERT();
3029
3030 if (inm->inm_nsrc == 0 ||
3031 (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0))
3032 return (0);
3033
3034 ifp = inm->inm_ifp; /* interface */
3035 mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */
3036 crt = REC_NONE; /* current group record type */
3037 drt = REC_NONE; /* mask of completed group record types */
3038 nrt = REC_NONE; /* record type for current node */
3039 m0srcs = 0; /* # source which will fit in current mbuf chain */
3040 nbytes = 0; /* # of bytes appended to group's state-change queue */
3041 npbytes = 0; /* # of bytes appended this packet */
3042 rsrcs = 0; /* # sources encoded in current record */
3043 schanged = 0; /* # nodes encoded in overall filter change */
3044 nallow = 0; /* # of source entries in ALLOW_NEW */
3045 nblock = 0; /* # of source entries in BLOCK_OLD */
3046 nims = NULL; /* next tree node pointer */
3047
3048 /*
3049 * For each possible filter record mode.
3050 * The first kind of source we encounter tells us which
3051 * is the first kind of record we start appending.
3052 * If a node transitioned to UNDEFINED at t1, its mode is treated
3053 * as the inverse of the group's filter mode.
3054 */
3055 while (drt != REC_FULL) {
3056 do {
3057 m0 = mbufq_last(mq);
3058 if (m0 != NULL &&
3059 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
3060 IGMP_V3_REPORT_MAXRECS) &&
3061 (m0->m_pkthdr.len + MINRECLEN) <
3062 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
3063 m = m0;
3064 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3065 sizeof(struct igmp_grouprec)) /
3066 sizeof(in_addr_t);
3067 CTR1(KTR_IGMPV3,
3068 "%s: use previous packet", __func__);
3069 } else {
3070 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3071 if (m)
3072 m->m_data += IGMP_LEADINGSPACE;
3073 if (m == NULL) {
3074 m = m_gethdr(M_NOWAIT, MT_DATA);
3075 if (m)
3076 M_ALIGN(m, IGMP_LEADINGSPACE);
3077 }
3078 if (m == NULL) {
3079 CTR1(KTR_IGMPV3,
3080 "%s: m_get*() failed", __func__);
3081 return (-ENOMEM);
3082 }
3083 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3084 igmp_save_context(m, ifp);
3085 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
3086 sizeof(struct igmp_grouprec)) /
3087 sizeof(in_addr_t);
3088 npbytes = 0;
3089 CTR1(KTR_IGMPV3,
3090 "%s: allocated new packet", __func__);
3091 }
3092 /*
3093 * Append the IGMP group record header to the
3094 * current packet's data area.
3095 * Recalculate pointer to free space for next
3096 * group record, in case m_append() allocated
3097 * a new mbuf or cluster.
3098 */
3099 memset(&ig, 0, sizeof(ig));
3100 ig.ig_group = inm->inm_addr;
3101 if (!m_append(m, sizeof(ig), (void *)&ig)) {
3102 if (m != m0)
3103 m_freem(m);
3104 CTR1(KTR_IGMPV3,
3105 "%s: m_append() failed", __func__);
3106 return (-ENOMEM);
3107 }
3108 npbytes += sizeof(struct igmp_grouprec);
3109 if (m != m0) {
3110 /* new packet; offset in c hain */
3111 md = m_getptr(m, npbytes -
3112 sizeof(struct igmp_grouprec), &off);
3113 pig = (struct igmp_grouprec *)(mtod(md,
3114 uint8_t *) + off);
3115 } else {
3116 /* current packet; offset from last append */
3117 md = m_last(m);
3118 pig = (struct igmp_grouprec *)(mtod(md,
3119 uint8_t *) + md->m_len -
3120 sizeof(struct igmp_grouprec));
3121 }
3122 /*
3123 * Begin walking the tree for this record type
3124 * pass, or continue from where we left off
3125 * previously if we had to allocate a new packet.
3126 * Only report deltas in-mode at t1.
3127 * We need not report included sources as allowed
3128 * if we are in inclusive mode on the group,
3129 * however the converse is not true.
3130 */
3131 rsrcs = 0;
3132 if (nims == NULL)
3133 nims = RB_MIN(ip_msource_tree, &inm->inm_srcs);
3134 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
3135 CTR2(KTR_IGMPV3, "%s: visit node %s",
3136 __func__, inet_ntoa_haddr(ims->ims_haddr));
3137 now = ims_get_mode(inm, ims, 1);
3138 then = ims_get_mode(inm, ims, 0);
3139 CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d",
3140 __func__, then, now);
3141 if (now == then) {
3142 CTR1(KTR_IGMPV3,
3143 "%s: skip unchanged", __func__);
3144 continue;
3145 }
3146 if (mode == MCAST_EXCLUDE &&
3147 now == MCAST_INCLUDE) {
3148 CTR1(KTR_IGMPV3,
3149 "%s: skip IN src on EX group",
3150 __func__);
3151 continue;
3152 }
3153 nrt = (rectype_t)now;
3154 if (nrt == REC_NONE)
3155 nrt = (rectype_t)(~mode & REC_FULL);
3156 if (schanged++ == 0) {
3157 crt = nrt;
3158 } else if (crt != nrt)
3159 continue;
3160 naddr = htonl(ims->ims_haddr);
3161 if (!m_append(m, sizeof(in_addr_t),
3162 (void *)&naddr)) {
3163 if (m != m0)
3164 m_freem(m);
3165 CTR1(KTR_IGMPV3,
3166 "%s: m_append() failed", __func__);
3167 return (-ENOMEM);
3168 }
3169 nallow += !!(crt == REC_ALLOW);
3170 nblock += !!(crt == REC_BLOCK);
3171 if (++rsrcs == m0srcs)
3172 break;
3173 }
3174 /*
3175 * If we did not append any tree nodes on this
3176 * pass, back out of allocations.
3177 */
3178 if (rsrcs == 0) {
3179 npbytes -= sizeof(struct igmp_grouprec);
3180 if (m != m0) {
3181 CTR1(KTR_IGMPV3,
3182 "%s: m_free(m)", __func__);
3183 m_freem(m);
3184 } else {
3185 CTR1(KTR_IGMPV3,
3186 "%s: m_adj(m, -ig)", __func__);
3187 m_adj(m, -((int)sizeof(
3188 struct igmp_grouprec)));
3189 }
3190 continue;
3191 }
3192 npbytes += (rsrcs * sizeof(in_addr_t));
3193 if (crt == REC_ALLOW)
3194 pig->ig_type = IGMP_ALLOW_NEW_SOURCES;
3195 else if (crt == REC_BLOCK)
3196 pig->ig_type = IGMP_BLOCK_OLD_SOURCES;
3197 pig->ig_numsrc = htons(rsrcs);
3198 /*
3199 * Count the new group record, and enqueue this
3200 * packet if it wasn't already queued.
3201 */
3202 m->m_pkthdr.PH_vt.vt_nrecs++;
3203 if (m != m0)
3204 mbufq_enqueue(mq, m);
3205 nbytes += npbytes;
3206 } while (nims != NULL);
3207 drt |= crt;
3208 crt = (~crt & REC_FULL);
3209 }
3210
3211 CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
3212 nallow, nblock);
3213
3214 return (nbytes);
3215 }
3216
3217 static int
3218 igmp_v3_merge_state_changes(struct in_multi *inm, struct mbufq *scq)
3219 {
3220 struct mbufq *gq;
3221 struct mbuf *m; /* pending state-change */
3222 struct mbuf *m0; /* copy of pending state-change */
3223 struct mbuf *mt; /* last state-change in packet */
3224 int docopy, domerge;
3225 u_int recslen;
3226
3227 docopy = 0;
3228 domerge = 0;
3229 recslen = 0;
3230
3231 IN_MULTI_LOCK_ASSERT();
3232 IGMP_LOCK_ASSERT();
3233
3234 /*
3235 * If there are further pending retransmissions, make a writable
3236 * copy of each queued state-change message before merging.
3237 */
3238 if (inm->inm_scrv > 0)
3239 docopy = 1;
3240
3241 gq = &inm->inm_scq;
3242 #ifdef KTR
3243 if (mbufq_first(gq) == NULL) {
3244 CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty",
3245 __func__, inm);
3246 }
3247 #endif
3248
3249 m = mbufq_first(gq);
3250 while (m != NULL) {
3251 /*
3252 * Only merge the report into the current packet if
3253 * there is sufficient space to do so; an IGMPv3 report
3254 * packet may only contain 65,535 group records.
3255 * Always use a simple mbuf chain concatentation to do this,
3256 * as large state changes for single groups may have
3257 * allocated clusters.
3258 */
3259 domerge = 0;
3260 mt = mbufq_last(scq);
3261 if (mt != NULL) {
3262 recslen = m_length(m, NULL);
3263
3264 if ((mt->m_pkthdr.PH_vt.vt_nrecs +
3265 m->m_pkthdr.PH_vt.vt_nrecs <=
3266 IGMP_V3_REPORT_MAXRECS) &&
3267 (mt->m_pkthdr.len + recslen <=
3268 (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE)))
3269 domerge = 1;
3270 }
3271
3272 if (!domerge && mbufq_full(gq)) {
3273 CTR2(KTR_IGMPV3,
3274 "%s: outbound queue full, skipping whole packet %p",
3275 __func__, m);
3276 mt = m->m_nextpkt;
3277 if (!docopy)
3278 m_freem(m);
3279 m = mt;
3280 continue;
3281 }
3282
3283 if (!docopy) {
3284 CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m);
3285 m0 = mbufq_dequeue(gq);
3286 m = m0->m_nextpkt;
3287 } else {
3288 CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m);
3289 m0 = m_dup(m, M_NOWAIT);
3290 if (m0 == NULL)
3291 return (ENOMEM);
3292 m0->m_nextpkt = NULL;
3293 m = m->m_nextpkt;
3294 }
3295
3296 if (!domerge) {
3297 CTR3(KTR_IGMPV3, "%s: queueing %p to scq %p)",
3298 __func__, m0, scq);
3299 mbufq_enqueue(scq, m0);
3300 } else {
3301 struct mbuf *mtl; /* last mbuf of packet mt */
3302
3303 CTR3(KTR_IGMPV3, "%s: merging %p with scq tail %p)",
3304 __func__, m0, mt);
3305
3306 mtl = m_last(mt);
3307 m0->m_flags &= ~M_PKTHDR;
3308 mt->m_pkthdr.len += recslen;
3309 mt->m_pkthdr.PH_vt.vt_nrecs +=
3310 m0->m_pkthdr.PH_vt.vt_nrecs;
3311
3312 mtl->m_next = m0;
3313 }
3314 }
3315
3316 return (0);
3317 }
3318
3319 /*
3320 * Respond to a pending IGMPv3 General Query.
3321 */
3322 static void
3323 igmp_v3_dispatch_general_query(struct igmp_ifsoftc *igi)
3324 {
3325 struct ifmultiaddr *ifma;
3326 struct ifnet *ifp;
3327 struct in_multi *inm;
3328 int retval, loop;
3329
3330 IN_MULTI_LOCK_ASSERT();
3331 IGMP_LOCK_ASSERT();
3332
3333 KASSERT(igi->igi_version == IGMP_VERSION_3,
3334 ("%s: called when version %d", __func__, igi->igi_version));
3335
3336 /*
3337 * Check that there are some packets queued. If so, send them first.
3338 * For large number of groups the reply to general query can take
3339 * many packets, we should finish sending them before starting of
3340 * queuing the new reply.
3341 */
3342 if (mbufq_len(&igi->igi_gq) != 0)
3343 goto send;
3344
3345 ifp = igi->igi_ifp;
3346
3347 IF_ADDR_RLOCK(ifp);
3348 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3349 if (ifma->ifma_addr->sa_family != AF_INET ||
3350 ifma->ifma_protospec == NULL)
3351 continue;
3352
3353 inm = (struct in_multi *)ifma->ifma_protospec;
3354 KASSERT(ifp == inm->inm_ifp,
3355 ("%s: inconsistent ifp", __func__));
3356
3357 switch (inm->inm_state) {
3358 case IGMP_NOT_MEMBER:
3359 case IGMP_SILENT_MEMBER:
3360 break;
3361 case IGMP_REPORTING_MEMBER:
3362 case IGMP_IDLE_MEMBER:
3363 case IGMP_LAZY_MEMBER:
3364 case IGMP_SLEEPING_MEMBER:
3365 case IGMP_AWAKENING_MEMBER:
3366 inm->inm_state = IGMP_REPORTING_MEMBER;
3367 retval = igmp_v3_enqueue_group_record(&igi->igi_gq,
3368 inm, 0, 0, 0);
3369 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
3370 __func__, retval);
3371 break;
3372 case IGMP_G_QUERY_PENDING_MEMBER:
3373 case IGMP_SG_QUERY_PENDING_MEMBER:
3374 case IGMP_LEAVING_MEMBER:
3375 break;
3376 }
3377 }
3378 IF_ADDR_RUNLOCK(ifp);
3379
3380 send:
3381 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
3382 igmp_dispatch_queue(&igi->igi_gq, IGMP_MAX_RESPONSE_BURST, loop);
3383
3384 /*
3385 * Slew transmission of bursts over 500ms intervals.
3386 */
3387 if (mbufq_first(&igi->igi_gq) != NULL) {
3388 igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY(
3389 IGMP_RESPONSE_BURST_INTERVAL);
3390 V_interface_timers_running = 1;
3391 }
3392 }
3393
3394 /*
3395 * Transmit the next pending IGMP message in the output queue.
3396 *
3397 * We get called from netisr_processqueue(). A mutex private to igmpoq
3398 * will be acquired and released around this routine.
3399 *
3400 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3401 * MRT: Nothing needs to be done, as IGMP traffic is always local to
3402 * a link and uses a link-scope multicast address.
3403 */
3404 static void
3405 igmp_intr(struct mbuf *m)
3406 {
3407 struct ip_moptions imo;
3408 struct ifnet *ifp;
3409 struct mbuf *ipopts, *m0;
3410 int error;
3411 uint32_t ifindex;
3412
3413 CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m);
3414
3415 /*
3416 * Set VNET image pointer from enqueued mbuf chain
3417 * before doing anything else. Whilst we use interface
3418 * indexes to guard against interface detach, they are
3419 * unique to each VIMAGE and must be retrieved.
3420 */
3421 CURVNET_SET((struct vnet *)(m->m_pkthdr.PH_loc.ptr));
3422 ifindex = igmp_restore_context(m);
3423
3424 /*
3425 * Check if the ifnet still exists. This limits the scope of
3426 * any race in the absence of a global ifp lock for low cost
3427 * (an array lookup).
3428 */
3429 ifp = ifnet_byindex(ifindex);
3430 if (ifp == NULL) {
3431 CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.",
3432 __func__, m, ifindex);
3433 m_freem(m);
3434 IPSTAT_INC(ips_noroute);
3435 goto out;
3436 }
3437
3438 ipopts = V_igmp_sendra ? m_raopt : NULL;
3439
3440 imo.imo_multicast_ttl = 1;
3441 imo.imo_multicast_vif = -1;
3442 imo.imo_multicast_loop = (V_ip_mrouter != NULL);
3443
3444 /*
3445 * If the user requested that IGMP traffic be explicitly
3446 * redirected to the loopback interface (e.g. they are running a
3447 * MANET interface and the routing protocol needs to see the
3448 * updates), handle this now.
3449 */
3450 if (m->m_flags & M_IGMP_LOOP)
3451 imo.imo_multicast_ifp = V_loif;
3452 else
3453 imo.imo_multicast_ifp = ifp;
3454
3455 if (m->m_flags & M_IGMPV2) {
3456 m0 = m;
3457 } else {
3458 m0 = igmp_v3_encap_report(ifp, m);
3459 if (m0 == NULL) {
3460 CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m);
3461 m_freem(m);
3462 IPSTAT_INC(ips_odropped);
3463 goto out;
3464 }
3465 }
3466
3467 igmp_scrub_context(m0);
3468 m_clrprotoflags(m);
3469 m0->m_pkthdr.rcvif = V_loif;
3470 #ifdef MAC
3471 mac_netinet_igmp_send(ifp, m0);
3472 #endif
3473 error = ip_output(m0, ipopts, NULL, 0, &imo, NULL);
3474 if (error) {
3475 CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error);
3476 goto out;
3477 }
3478
3479 IGMPSTAT_INC(igps_snd_reports);
3480
3481 out:
3482 /*
3483 * We must restore the existing vnet pointer before
3484 * continuing as we are run from netisr context.
3485 */
3486 CURVNET_RESTORE();
3487 }
3488
3489 /*
3490 * Encapsulate an IGMPv3 report.
3491 *
3492 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf
3493 * chain has already had its IP/IGMPv3 header prepended. In this case
3494 * the function will not attempt to prepend; the lengths and checksums
3495 * will however be re-computed.
3496 *
3497 * Returns a pointer to the new mbuf chain head, or NULL if the
3498 * allocation failed.
3499 */
3500 static struct mbuf *
3501 igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m)
3502 {
3503 struct rm_priotracker in_ifa_tracker;
3504 struct igmp_report *igmp;
3505 struct ip *ip;
3506 int hdrlen, igmpreclen;
3507
3508 KASSERT((m->m_flags & M_PKTHDR),
3509 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3510
3511 igmpreclen = m_length(m, NULL);
3512 hdrlen = sizeof(struct ip) + sizeof(struct igmp_report);
3513
3514 if (m->m_flags & M_IGMPV3_HDR) {
3515 igmpreclen -= hdrlen;
3516 } else {
3517 M_PREPEND(m, hdrlen, M_NOWAIT);
3518 if (m == NULL)
3519 return (NULL);
3520 m->m_flags |= M_IGMPV3_HDR;
3521 }
3522
3523 CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen);
3524
3525 m->m_data += sizeof(struct ip);
3526 m->m_len -= sizeof(struct ip);
3527
3528 igmp = mtod(m, struct igmp_report *);
3529 igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT;
3530 igmp->ir_rsv1 = 0;
3531 igmp->ir_rsv2 = 0;
3532 igmp->ir_numgrps = htons(m->m_pkthdr.PH_vt.vt_nrecs);
3533 igmp->ir_cksum = 0;
3534 igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen);
3535 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3536
3537 m->m_data -= sizeof(struct ip);
3538 m->m_len += sizeof(struct ip);
3539
3540 ip = mtod(m, struct ip *);
3541 ip->ip_tos = IPTOS_PREC_INTERNETCONTROL;
3542 ip->ip_len = htons(hdrlen + igmpreclen);
3543 ip->ip_off = htons(IP_DF);
3544 ip->ip_p = IPPROTO_IGMP;
3545 ip->ip_sum = 0;
3546
3547 ip->ip_src.s_addr = INADDR_ANY;
3548
3549 if (m->m_flags & M_IGMP_LOOP) {
3550 struct in_ifaddr *ia;
3551
3552 IFP_TO_IA(ifp, ia, &in_ifa_tracker);
3553 if (ia != NULL) {
3554 ip->ip_src = ia->ia_addr.sin_addr;
3555 ifa_free(&ia->ia_ifa);
3556 }
3557 }
3558
3559 ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP);
3560
3561 return (m);
3562 }
3563
3564 #ifdef KTR
3565 static char *
3566 igmp_rec_type_to_str(const int type)
3567 {
3568
3569 switch (type) {
3570 case IGMP_CHANGE_TO_EXCLUDE_MODE:
3571 return "TO_EX";
3572 break;
3573 case IGMP_CHANGE_TO_INCLUDE_MODE:
3574 return "TO_IN";
3575 break;
3576 case IGMP_MODE_IS_EXCLUDE:
3577 return "MODE_EX";
3578 break;
3579 case IGMP_MODE_IS_INCLUDE:
3580 return "MODE_IN";
3581 break;
3582 case IGMP_ALLOW_NEW_SOURCES:
3583 return "ALLOW_NEW";
3584 break;
3585 case IGMP_BLOCK_OLD_SOURCES:
3586 return "BLOCK_OLD";
3587 break;
3588 default:
3589 break;
3590 }
3591 return "unknown";
3592 }
3593 #endif
3594
3595 #ifdef VIMAGE
3596 static void
3597 vnet_igmp_init(const void *unused __unused)
3598 {
3599
3600 netisr_register_vnet(&igmp_nh);
3601 }
3602 VNET_SYSINIT(vnet_igmp_init, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3603 vnet_igmp_init, NULL);
3604
3605 static void
3606 vnet_igmp_uninit(const void *unused __unused)
3607 {
3608
3609 /* This can happen when we shutdown the entire network stack. */
3610 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3611
3612 netisr_unregister_vnet(&igmp_nh);
3613 }
3614 VNET_SYSUNINIT(vnet_igmp_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3615 vnet_igmp_uninit, NULL);
3616 #endif
3617
3618 #ifdef DDB
3619 DB_SHOW_COMMAND(igi_list, db_show_igi_list)
3620 {
3621 struct igmp_ifsoftc *igi, *tigi;
3622 LIST_HEAD(_igi_list, igmp_ifsoftc) *igi_head;
3623
3624 if (!have_addr) {
3625 db_printf("usage: show igi_list <addr>\n");
3626 return;
3627 }
3628 igi_head = (struct _igi_list *)addr;
3629
3630 LIST_FOREACH_SAFE(igi, igi_head, igi_link, tigi) {
3631 db_printf("igmp_ifsoftc %p:\n", igi);
3632 db_printf(" ifp %p\n", igi->igi_ifp);
3633 db_printf(" version %u\n", igi->igi_version);
3634 db_printf(" v1_timer %u\n", igi->igi_v1_timer);
3635 db_printf(" v2_timer %u\n", igi->igi_v2_timer);
3636 db_printf(" v3_timer %u\n", igi->igi_v3_timer);
3637 db_printf(" flags %#x\n", igi->igi_flags);
3638 db_printf(" rv %u\n", igi->igi_rv);
3639 db_printf(" qi %u\n", igi->igi_qi);
3640 db_printf(" qri %u\n", igi->igi_qri);
3641 db_printf(" uri %u\n", igi->igi_uri);
3642 /* SLIST_HEAD(,in_multi) igi_relinmhead */
3643 /* struct mbufq igi_gq; */
3644 db_printf("\n");
3645 }
3646 }
3647 #endif
3648
3649 static int
3650 igmp_modevent(module_t mod, int type, void *unused __unused)
3651 {
3652
3653 switch (type) {
3654 case MOD_LOAD:
3655 CTR1(KTR_IGMPV3, "%s: initializing", __func__);
3656 IGMP_LOCK_INIT();
3657 m_raopt = igmp_ra_alloc();
3658 netisr_register(&igmp_nh);
3659 break;
3660 case MOD_UNLOAD:
3661 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3662 netisr_unregister(&igmp_nh);
3663 m_free(m_raopt);
3664 m_raopt = NULL;
3665 IGMP_LOCK_DESTROY();
3666 break;
3667 default:
3668 return (EOPNOTSUPP);
3669 }
3670 return (0);
3671 }
3672
3673 static moduledata_t igmp_mod = {
3674 "igmp",
3675 igmp_modevent,
3676 0
3677 };
3678 DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE);
Cache object: ca148ff029a3bb5115fb9657c94f046a
|