FreeBSD/Linux Kernel Cross Reference
sys/netinet/igmp.c
1 /*-
2 * Copyright (c) 2007-2009 Bruce Simpson.
3 * Copyright (c) 1988 Stephen Deering.
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Stephen Deering of Stanford University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
35 */
36
37 /*
38 * Internet Group Management Protocol (IGMP) routines.
39 * [RFC1112, RFC2236, RFC3376]
40 *
41 * Written by Steve Deering, Stanford, May 1988.
42 * Modified by Rosen Sharma, Stanford, Aug 1994.
43 * Modified by Bill Fenner, Xerox PARC, Feb 1995.
44 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995.
45 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson.
46 *
47 * MULTICAST Revision: 3.5.1.4
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD: releng/11.1/sys/netinet/igmp.c 315456 2017-03-17 14:54:10Z vangyzen $");
52
53 #include "opt_ddb.h"
54
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/module.h>
58 #include <sys/malloc.h>
59 #include <sys/mbuf.h>
60 #include <sys/socket.h>
61 #include <sys/protosw.h>
62 #include <sys/kernel.h>
63 #include <sys/lock.h>
64 #include <sys/rmlock.h>
65 #include <sys/sysctl.h>
66 #include <sys/ktr.h>
67 #include <sys/condvar.h>
68
69 #ifdef DDB
70 #include <ddb/ddb.h>
71 #endif
72
73 #include <net/if.h>
74 #include <net/if_var.h>
75 #include <net/netisr.h>
76 #include <net/vnet.h>
77
78 #include <netinet/in.h>
79 #include <netinet/in_var.h>
80 #include <netinet/in_systm.h>
81 #include <netinet/ip.h>
82 #include <netinet/ip_var.h>
83 #include <netinet/ip_options.h>
84 #include <netinet/igmp.h>
85 #include <netinet/igmp_var.h>
86
87 #include <machine/in_cksum.h>
88
89 #include <security/mac/mac_framework.h>
90
91 #ifndef KTR_IGMPV3
92 #define KTR_IGMPV3 KTR_INET
93 #endif
94
95 static struct igmp_ifsoftc *
96 igi_alloc_locked(struct ifnet *);
97 static void igi_delete_locked(const struct ifnet *);
98 static void igmp_dispatch_queue(struct mbufq *, int, const int);
99 static void igmp_fasttimo_vnet(void);
100 static void igmp_final_leave(struct in_multi *, struct igmp_ifsoftc *);
101 static int igmp_handle_state_change(struct in_multi *,
102 struct igmp_ifsoftc *);
103 static int igmp_initial_join(struct in_multi *, struct igmp_ifsoftc *);
104 static int igmp_input_v1_query(struct ifnet *, const struct ip *,
105 const struct igmp *);
106 static int igmp_input_v2_query(struct ifnet *, const struct ip *,
107 const struct igmp *);
108 static int igmp_input_v3_query(struct ifnet *, const struct ip *,
109 /*const*/ struct igmpv3 *);
110 static int igmp_input_v3_group_query(struct in_multi *,
111 struct igmp_ifsoftc *, int, /*const*/ struct igmpv3 *);
112 static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *,
113 /*const*/ struct igmp *);
114 static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *,
115 /*const*/ struct igmp *);
116 static void igmp_intr(struct mbuf *);
117 static int igmp_isgroupreported(const struct in_addr);
118 static struct mbuf *
119 igmp_ra_alloc(void);
120 #ifdef KTR
121 static char * igmp_rec_type_to_str(const int);
122 #endif
123 static void igmp_set_version(struct igmp_ifsoftc *, const int);
124 static void igmp_slowtimo_vnet(void);
125 static int igmp_v1v2_queue_report(struct in_multi *, const int);
126 static void igmp_v1v2_process_group_timer(struct in_multi *, const int);
127 static void igmp_v1v2_process_querier_timers(struct igmp_ifsoftc *);
128 static void igmp_v2_update_group(struct in_multi *, const int);
129 static void igmp_v3_cancel_link_timers(struct igmp_ifsoftc *);
130 static void igmp_v3_dispatch_general_query(struct igmp_ifsoftc *);
131 static struct mbuf *
132 igmp_v3_encap_report(struct ifnet *, struct mbuf *);
133 static int igmp_v3_enqueue_group_record(struct mbufq *,
134 struct in_multi *, const int, const int, const int);
135 static int igmp_v3_enqueue_filter_change(struct mbufq *,
136 struct in_multi *);
137 static void igmp_v3_process_group_timers(struct igmp_ifsoftc *,
138 struct mbufq *, struct mbufq *, struct in_multi *,
139 const int);
140 static int igmp_v3_merge_state_changes(struct in_multi *,
141 struct mbufq *);
142 static void igmp_v3_suppress_group_record(struct in_multi *);
143 static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS);
144 static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS);
145 static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS);
146
147 static const struct netisr_handler igmp_nh = {
148 .nh_name = "igmp",
149 .nh_handler = igmp_intr,
150 .nh_proto = NETISR_IGMP,
151 .nh_policy = NETISR_POLICY_SOURCE,
152 };
153
154 /*
155 * System-wide globals.
156 *
157 * Unlocked access to these is OK, except for the global IGMP output
158 * queue. The IGMP subsystem lock ends up being system-wide for the moment,
159 * because all VIMAGEs have to share a global output queue, as netisrs
160 * themselves are not virtualized.
161 *
162 * Locking:
163 * * The permitted lock order is: IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
164 * Any may be taken independently; if any are held at the same
165 * time, the above lock order must be followed.
166 * * All output is delegated to the netisr.
167 * Now that Giant has been eliminated, the netisr may be inlined.
168 * * IN_MULTI_LOCK covers in_multi.
169 * * IGMP_LOCK covers igmp_ifsoftc and any global variables in this file,
170 * including the output queue.
171 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
172 * per-link state iterators.
173 * * igmp_ifsoftc is valid as long as PF_INET is attached to the interface,
174 * therefore it is not refcounted.
175 * We allow unlocked reads of igmp_ifsoftc when accessed via in_multi.
176 *
177 * Reference counting
178 * * IGMP acquires its own reference every time an in_multi is passed to
179 * it and the group is being joined for the first time.
180 * * IGMP releases its reference(s) on in_multi in a deferred way,
181 * because the operations which process the release run as part of
182 * a loop whose control variables are directly affected by the release
183 * (that, and not recursing on the IF_ADDR_LOCK).
184 *
185 * VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds
186 * to a vnet in ifp->if_vnet.
187 *
188 * SMPng: XXX We may potentially race operations on ifma_protospec.
189 * The problem is that we currently lack a clean way of taking the
190 * IF_ADDR_LOCK() between the ifnet and in layers w/o recursing,
191 * as anything which modifies ifma needs to be covered by that lock.
192 * So check for ifma_protospec being NULL before proceeding.
193 */
194 struct mtx igmp_mtx;
195
196 struct mbuf *m_raopt; /* Router Alert option */
197 static MALLOC_DEFINE(M_IGMP, "igmp", "igmp state");
198
199 /*
200 * VIMAGE-wide globals.
201 *
202 * The IGMPv3 timers themselves need to run per-image, however,
203 * protosw timers run globally (see tcp).
204 * An ifnet can only be in one vimage at a time, and the loopback
205 * ifnet, loif, is itself virtualized.
206 * It would otherwise be possible to seriously hose IGMP state,
207 * and create inconsistencies in upstream multicast routing, if you have
208 * multiple VIMAGEs running on the same link joining different multicast
209 * groups, UNLESS the "primary IP address" is different. This is because
210 * IGMP for IPv4 does not force link-local addresses to be used for each
211 * node, unlike MLD for IPv6.
212 * Obviously the IGMPv3 per-interface state has per-vimage granularity
213 * also as a result.
214 *
215 * FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection
216 * policy to control the address used by IGMP on the link.
217 */
218 static VNET_DEFINE(int, interface_timers_running); /* IGMPv3 general
219 * query response */
220 static VNET_DEFINE(int, state_change_timers_running); /* IGMPv3 state-change
221 * retransmit */
222 static VNET_DEFINE(int, current_state_timers_running); /* IGMPv1/v2 host
223 * report; IGMPv3 g/sg
224 * query response */
225
226 #define V_interface_timers_running VNET(interface_timers_running)
227 #define V_state_change_timers_running VNET(state_change_timers_running)
228 #define V_current_state_timers_running VNET(current_state_timers_running)
229
230 static VNET_DEFINE(LIST_HEAD(, igmp_ifsoftc), igi_head) =
231 LIST_HEAD_INITIALIZER(igi_head);
232 static VNET_DEFINE(struct igmpstat, igmpstat) = {
233 .igps_version = IGPS_VERSION_3,
234 .igps_len = sizeof(struct igmpstat),
235 };
236 static VNET_DEFINE(struct timeval, igmp_gsrdelay) = {10, 0};
237
238 #define V_igi_head VNET(igi_head)
239 #define V_igmpstat VNET(igmpstat)
240 #define V_igmp_gsrdelay VNET(igmp_gsrdelay)
241
242 static VNET_DEFINE(int, igmp_recvifkludge) = 1;
243 static VNET_DEFINE(int, igmp_sendra) = 1;
244 static VNET_DEFINE(int, igmp_sendlocal) = 1;
245 static VNET_DEFINE(int, igmp_v1enable) = 1;
246 static VNET_DEFINE(int, igmp_v2enable) = 1;
247 static VNET_DEFINE(int, igmp_legacysupp);
248 static VNET_DEFINE(int, igmp_default_version) = IGMP_VERSION_3;
249
250 #define V_igmp_recvifkludge VNET(igmp_recvifkludge)
251 #define V_igmp_sendra VNET(igmp_sendra)
252 #define V_igmp_sendlocal VNET(igmp_sendlocal)
253 #define V_igmp_v1enable VNET(igmp_v1enable)
254 #define V_igmp_v2enable VNET(igmp_v2enable)
255 #define V_igmp_legacysupp VNET(igmp_legacysupp)
256 #define V_igmp_default_version VNET(igmp_default_version)
257
258 /*
259 * Virtualized sysctls.
260 */
261 SYSCTL_STRUCT(_net_inet_igmp, IGMPCTL_STATS, stats, CTLFLAG_VNET | CTLFLAG_RW,
262 &VNET_NAME(igmpstat), igmpstat, "");
263 SYSCTL_INT(_net_inet_igmp, OID_AUTO, recvifkludge, CTLFLAG_VNET | CTLFLAG_RW,
264 &VNET_NAME(igmp_recvifkludge), 0,
265 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address");
266 SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendra, CTLFLAG_VNET | CTLFLAG_RW,
267 &VNET_NAME(igmp_sendra), 0,
268 "Send IP Router Alert option in IGMPv2/v3 messages");
269 SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendlocal, CTLFLAG_VNET | CTLFLAG_RW,
270 &VNET_NAME(igmp_sendlocal), 0,
271 "Send IGMP membership reports for 224.0.0.0/24 groups");
272 SYSCTL_INT(_net_inet_igmp, OID_AUTO, v1enable, CTLFLAG_VNET | CTLFLAG_RW,
273 &VNET_NAME(igmp_v1enable), 0,
274 "Enable backwards compatibility with IGMPv1");
275 SYSCTL_INT(_net_inet_igmp, OID_AUTO, v2enable, CTLFLAG_VNET | CTLFLAG_RW,
276 &VNET_NAME(igmp_v2enable), 0,
277 "Enable backwards compatibility with IGMPv2");
278 SYSCTL_INT(_net_inet_igmp, OID_AUTO, legacysupp, CTLFLAG_VNET | CTLFLAG_RW,
279 &VNET_NAME(igmp_legacysupp), 0,
280 "Allow v1/v2 reports to suppress v3 group responses");
281 SYSCTL_PROC(_net_inet_igmp, OID_AUTO, default_version,
282 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
283 &VNET_NAME(igmp_default_version), 0, sysctl_igmp_default_version, "I",
284 "Default version of IGMP to run on each interface");
285 SYSCTL_PROC(_net_inet_igmp, OID_AUTO, gsrdelay,
286 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
287 &VNET_NAME(igmp_gsrdelay.tv_sec), 0, sysctl_igmp_gsr, "I",
288 "Rate limit for IGMPv3 Group-and-Source queries in seconds");
289
290 /*
291 * Non-virtualized sysctls.
292 */
293 static SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo,
294 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_igmp_ifinfo,
295 "Per-interface IGMPv3 state");
296
297 static __inline void
298 igmp_save_context(struct mbuf *m, struct ifnet *ifp)
299 {
300
301 #ifdef VIMAGE
302 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
303 #endif /* VIMAGE */
304 m->m_pkthdr.flowid = ifp->if_index;
305 }
306
307 static __inline void
308 igmp_scrub_context(struct mbuf *m)
309 {
310
311 m->m_pkthdr.PH_loc.ptr = NULL;
312 m->m_pkthdr.flowid = 0;
313 }
314
315 /*
316 * Restore context from a queued IGMP output chain.
317 * Return saved ifindex.
318 *
319 * VIMAGE: The assertion is there to make sure that we
320 * actually called CURVNET_SET() with what's in the mbuf chain.
321 */
322 static __inline uint32_t
323 igmp_restore_context(struct mbuf *m)
324 {
325
326 #ifdef notyet
327 #if defined(VIMAGE) && defined(INVARIANTS)
328 KASSERT(curvnet == (m->m_pkthdr.PH_loc.ptr),
329 ("%s: called when curvnet was not restored", __func__));
330 #endif
331 #endif
332 return (m->m_pkthdr.flowid);
333 }
334
335 /*
336 * Retrieve or set default IGMP version.
337 *
338 * VIMAGE: Assume curvnet set by caller.
339 * SMPng: NOTE: Serialized by IGMP lock.
340 */
341 static int
342 sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS)
343 {
344 int error;
345 int new;
346
347 error = sysctl_wire_old_buffer(req, sizeof(int));
348 if (error)
349 return (error);
350
351 IGMP_LOCK();
352
353 new = V_igmp_default_version;
354
355 error = sysctl_handle_int(oidp, &new, 0, req);
356 if (error || !req->newptr)
357 goto out_locked;
358
359 if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) {
360 error = EINVAL;
361 goto out_locked;
362 }
363
364 CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d",
365 V_igmp_default_version, new);
366
367 V_igmp_default_version = new;
368
369 out_locked:
370 IGMP_UNLOCK();
371 return (error);
372 }
373
374 /*
375 * Retrieve or set threshold between group-source queries in seconds.
376 *
377 * VIMAGE: Assume curvnet set by caller.
378 * SMPng: NOTE: Serialized by IGMP lock.
379 */
380 static int
381 sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS)
382 {
383 int error;
384 int i;
385
386 error = sysctl_wire_old_buffer(req, sizeof(int));
387 if (error)
388 return (error);
389
390 IGMP_LOCK();
391
392 i = V_igmp_gsrdelay.tv_sec;
393
394 error = sysctl_handle_int(oidp, &i, 0, req);
395 if (error || !req->newptr)
396 goto out_locked;
397
398 if (i < -1 || i >= 60) {
399 error = EINVAL;
400 goto out_locked;
401 }
402
403 CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d",
404 V_igmp_gsrdelay.tv_sec, i);
405 V_igmp_gsrdelay.tv_sec = i;
406
407 out_locked:
408 IGMP_UNLOCK();
409 return (error);
410 }
411
412 /*
413 * Expose struct igmp_ifsoftc to userland, keyed by ifindex.
414 * For use by ifmcstat(8).
415 *
416 * SMPng: NOTE: Does an unlocked ifindex space read.
417 * VIMAGE: Assume curvnet set by caller. The node handler itself
418 * is not directly virtualized.
419 */
420 static int
421 sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
422 {
423 int *name;
424 int error;
425 u_int namelen;
426 struct ifnet *ifp;
427 struct igmp_ifsoftc *igi;
428
429 name = (int *)arg1;
430 namelen = arg2;
431
432 if (req->newptr != NULL)
433 return (EPERM);
434
435 if (namelen != 1)
436 return (EINVAL);
437
438 error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo));
439 if (error)
440 return (error);
441
442 IN_MULTI_LOCK();
443 IGMP_LOCK();
444
445 if (name[0] <= 0 || name[0] > V_if_index) {
446 error = ENOENT;
447 goto out_locked;
448 }
449
450 error = ENOENT;
451
452 ifp = ifnet_byindex(name[0]);
453 if (ifp == NULL)
454 goto out_locked;
455
456 LIST_FOREACH(igi, &V_igi_head, igi_link) {
457 if (ifp == igi->igi_ifp) {
458 struct igmp_ifinfo info;
459
460 info.igi_version = igi->igi_version;
461 info.igi_v1_timer = igi->igi_v1_timer;
462 info.igi_v2_timer = igi->igi_v2_timer;
463 info.igi_v3_timer = igi->igi_v3_timer;
464 info.igi_flags = igi->igi_flags;
465 info.igi_rv = igi->igi_rv;
466 info.igi_qi = igi->igi_qi;
467 info.igi_qri = igi->igi_qri;
468 info.igi_uri = igi->igi_uri;
469 error = SYSCTL_OUT(req, &info, sizeof(info));
470 break;
471 }
472 }
473
474 out_locked:
475 IGMP_UNLOCK();
476 IN_MULTI_UNLOCK();
477 return (error);
478 }
479
480 /*
481 * Dispatch an entire queue of pending packet chains
482 * using the netisr.
483 * VIMAGE: Assumes the vnet pointer has been set.
484 */
485 static void
486 igmp_dispatch_queue(struct mbufq *mq, int limit, const int loop)
487 {
488 struct mbuf *m;
489
490 while ((m = mbufq_dequeue(mq)) != NULL) {
491 CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, mq, m);
492 if (loop)
493 m->m_flags |= M_IGMP_LOOP;
494 netisr_dispatch(NETISR_IGMP, m);
495 if (--limit == 0)
496 break;
497 }
498 }
499
500 /*
501 * Filter outgoing IGMP report state by group.
502 *
503 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1).
504 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are
505 * disabled for all groups in the 224.0.0.0/24 link-local scope. However,
506 * this may break certain IGMP snooping switches which rely on the old
507 * report behaviour.
508 *
509 * Return zero if the given group is one for which IGMP reports
510 * should be suppressed, or non-zero if reports should be issued.
511 */
512 static __inline int
513 igmp_isgroupreported(const struct in_addr addr)
514 {
515
516 if (in_allhosts(addr) ||
517 ((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr)))))
518 return (0);
519
520 return (1);
521 }
522
523 /*
524 * Construct a Router Alert option to use in outgoing packets.
525 */
526 static struct mbuf *
527 igmp_ra_alloc(void)
528 {
529 struct mbuf *m;
530 struct ipoption *p;
531
532 m = m_get(M_WAITOK, MT_DATA);
533 p = mtod(m, struct ipoption *);
534 p->ipopt_dst.s_addr = INADDR_ANY;
535 p->ipopt_list[0] = (char)IPOPT_RA; /* Router Alert Option */
536 p->ipopt_list[1] = 0x04; /* 4 bytes long */
537 p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */
538 p->ipopt_list[3] = 0x00; /* pad byte */
539 m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1];
540
541 return (m);
542 }
543
544 /*
545 * Attach IGMP when PF_INET is attached to an interface.
546 */
547 struct igmp_ifsoftc *
548 igmp_domifattach(struct ifnet *ifp)
549 {
550 struct igmp_ifsoftc *igi;
551
552 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
553 __func__, ifp, ifp->if_xname);
554
555 IGMP_LOCK();
556
557 igi = igi_alloc_locked(ifp);
558 if (!(ifp->if_flags & IFF_MULTICAST))
559 igi->igi_flags |= IGIF_SILENT;
560
561 IGMP_UNLOCK();
562
563 return (igi);
564 }
565
566 /*
567 * VIMAGE: assume curvnet set by caller.
568 */
569 static struct igmp_ifsoftc *
570 igi_alloc_locked(/*const*/ struct ifnet *ifp)
571 {
572 struct igmp_ifsoftc *igi;
573
574 IGMP_LOCK_ASSERT();
575
576 igi = malloc(sizeof(struct igmp_ifsoftc), M_IGMP, M_NOWAIT|M_ZERO);
577 if (igi == NULL)
578 goto out;
579
580 igi->igi_ifp = ifp;
581 igi->igi_version = V_igmp_default_version;
582 igi->igi_flags = 0;
583 igi->igi_rv = IGMP_RV_INIT;
584 igi->igi_qi = IGMP_QI_INIT;
585 igi->igi_qri = IGMP_QRI_INIT;
586 igi->igi_uri = IGMP_URI_INIT;
587 SLIST_INIT(&igi->igi_relinmhead);
588 mbufq_init(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
589
590 LIST_INSERT_HEAD(&V_igi_head, igi, igi_link);
591
592 CTR2(KTR_IGMPV3, "allocate igmp_ifsoftc for ifp %p(%s)",
593 ifp, ifp->if_xname);
594
595 out:
596 return (igi);
597 }
598
599 /*
600 * Hook for ifdetach.
601 *
602 * NOTE: Some finalization tasks need to run before the protocol domain
603 * is detached, but also before the link layer does its cleanup.
604 *
605 * SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK().
606 * XXX This is also bitten by unlocked ifma_protospec access.
607 */
608 void
609 igmp_ifdetach(struct ifnet *ifp)
610 {
611 struct igmp_ifsoftc *igi;
612 struct ifmultiaddr *ifma;
613 struct in_multi *inm, *tinm;
614
615 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp,
616 ifp->if_xname);
617
618 IGMP_LOCK();
619
620 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
621 if (igi->igi_version == IGMP_VERSION_3) {
622 IF_ADDR_RLOCK(ifp);
623 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
624 if (ifma->ifma_addr->sa_family != AF_INET ||
625 ifma->ifma_protospec == NULL)
626 continue;
627 #if 0
628 KASSERT(ifma->ifma_protospec != NULL,
629 ("%s: ifma_protospec is NULL", __func__));
630 #endif
631 inm = (struct in_multi *)ifma->ifma_protospec;
632 if (inm->inm_state == IGMP_LEAVING_MEMBER) {
633 SLIST_INSERT_HEAD(&igi->igi_relinmhead,
634 inm, inm_nrele);
635 }
636 inm_clear_recorded(inm);
637 }
638 IF_ADDR_RUNLOCK(ifp);
639 /*
640 * Free the in_multi reference(s) for this IGMP lifecycle.
641 */
642 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele,
643 tinm) {
644 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele);
645 inm_release_locked(inm);
646 }
647 }
648
649 IGMP_UNLOCK();
650 }
651
652 /*
653 * Hook for domifdetach.
654 */
655 void
656 igmp_domifdetach(struct ifnet *ifp)
657 {
658
659 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
660 __func__, ifp, ifp->if_xname);
661
662 IGMP_LOCK();
663 igi_delete_locked(ifp);
664 IGMP_UNLOCK();
665 }
666
667 static void
668 igi_delete_locked(const struct ifnet *ifp)
669 {
670 struct igmp_ifsoftc *igi, *tigi;
671
672 CTR3(KTR_IGMPV3, "%s: freeing igmp_ifsoftc for ifp %p(%s)",
673 __func__, ifp, ifp->if_xname);
674
675 IGMP_LOCK_ASSERT();
676
677 LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) {
678 if (igi->igi_ifp == ifp) {
679 /*
680 * Free deferred General Query responses.
681 */
682 mbufq_drain(&igi->igi_gq);
683
684 LIST_REMOVE(igi, igi_link);
685
686 KASSERT(SLIST_EMPTY(&igi->igi_relinmhead),
687 ("%s: there are dangling in_multi references",
688 __func__));
689
690 free(igi, M_IGMP);
691 return;
692 }
693 }
694 }
695
696 /*
697 * Process a received IGMPv1 query.
698 * Return non-zero if the message should be dropped.
699 *
700 * VIMAGE: The curvnet pointer is derived from the input ifp.
701 */
702 static int
703 igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip,
704 const struct igmp *igmp)
705 {
706 struct ifmultiaddr *ifma;
707 struct igmp_ifsoftc *igi;
708 struct in_multi *inm;
709
710 /*
711 * IGMPv1 Host Mmembership Queries SHOULD always be addressed to
712 * 224.0.0.1. They are always treated as General Queries.
713 * igmp_group is always ignored. Do not drop it as a userland
714 * daemon may wish to see it.
715 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
716 */
717 if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) {
718 IGMPSTAT_INC(igps_rcv_badqueries);
719 return (0);
720 }
721 IGMPSTAT_INC(igps_rcv_gen_queries);
722
723 IN_MULTI_LOCK();
724 IGMP_LOCK();
725
726 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
727 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
728
729 if (igi->igi_flags & IGIF_LOOPBACK) {
730 CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)",
731 ifp, ifp->if_xname);
732 goto out_locked;
733 }
734
735 /*
736 * Switch to IGMPv1 host compatibility mode.
737 */
738 igmp_set_version(igi, IGMP_VERSION_1);
739
740 CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname);
741
742 /*
743 * Start the timers in all of our group records
744 * for the interface on which the query arrived,
745 * except those which are already running.
746 */
747 IF_ADDR_RLOCK(ifp);
748 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
749 if (ifma->ifma_addr->sa_family != AF_INET ||
750 ifma->ifma_protospec == NULL)
751 continue;
752 inm = (struct in_multi *)ifma->ifma_protospec;
753 if (inm->inm_timer != 0)
754 continue;
755 switch (inm->inm_state) {
756 case IGMP_NOT_MEMBER:
757 case IGMP_SILENT_MEMBER:
758 break;
759 case IGMP_G_QUERY_PENDING_MEMBER:
760 case IGMP_SG_QUERY_PENDING_MEMBER:
761 case IGMP_REPORTING_MEMBER:
762 case IGMP_IDLE_MEMBER:
763 case IGMP_LAZY_MEMBER:
764 case IGMP_SLEEPING_MEMBER:
765 case IGMP_AWAKENING_MEMBER:
766 inm->inm_state = IGMP_REPORTING_MEMBER;
767 inm->inm_timer = IGMP_RANDOM_DELAY(
768 IGMP_V1V2_MAX_RI * PR_FASTHZ);
769 V_current_state_timers_running = 1;
770 break;
771 case IGMP_LEAVING_MEMBER:
772 break;
773 }
774 }
775 IF_ADDR_RUNLOCK(ifp);
776
777 out_locked:
778 IGMP_UNLOCK();
779 IN_MULTI_UNLOCK();
780
781 return (0);
782 }
783
784 /*
785 * Process a received IGMPv2 general or group-specific query.
786 */
787 static int
788 igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip,
789 const struct igmp *igmp)
790 {
791 struct ifmultiaddr *ifma;
792 struct igmp_ifsoftc *igi;
793 struct in_multi *inm;
794 int is_general_query;
795 uint16_t timer;
796
797 is_general_query = 0;
798
799 /*
800 * Validate address fields upfront.
801 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
802 */
803 if (in_nullhost(igmp->igmp_group)) {
804 /*
805 * IGMPv2 General Query.
806 * If this was not sent to the all-hosts group, ignore it.
807 */
808 if (!in_allhosts(ip->ip_dst))
809 return (0);
810 IGMPSTAT_INC(igps_rcv_gen_queries);
811 is_general_query = 1;
812 } else {
813 /* IGMPv2 Group-Specific Query. */
814 IGMPSTAT_INC(igps_rcv_group_queries);
815 }
816
817 IN_MULTI_LOCK();
818 IGMP_LOCK();
819
820 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
821 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
822
823 if (igi->igi_flags & IGIF_LOOPBACK) {
824 CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)",
825 ifp, ifp->if_xname);
826 goto out_locked;
827 }
828
829 /*
830 * Ignore v2 query if in v1 Compatibility Mode.
831 */
832 if (igi->igi_version == IGMP_VERSION_1)
833 goto out_locked;
834
835 igmp_set_version(igi, IGMP_VERSION_2);
836
837 timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE;
838 if (timer == 0)
839 timer = 1;
840
841 if (is_general_query) {
842 /*
843 * For each reporting group joined on this
844 * interface, kick the report timer.
845 */
846 CTR2(KTR_IGMPV3, "process v2 general query on ifp %p(%s)",
847 ifp, ifp->if_xname);
848 IF_ADDR_RLOCK(ifp);
849 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
850 if (ifma->ifma_addr->sa_family != AF_INET ||
851 ifma->ifma_protospec == NULL)
852 continue;
853 inm = (struct in_multi *)ifma->ifma_protospec;
854 igmp_v2_update_group(inm, timer);
855 }
856 IF_ADDR_RUNLOCK(ifp);
857 } else {
858 /*
859 * Group-specific IGMPv2 query, we need only
860 * look up the single group to process it.
861 */
862 inm = inm_lookup(ifp, igmp->igmp_group);
863 if (inm != NULL) {
864 CTR3(KTR_IGMPV3,
865 "process v2 query 0x%08x on ifp %p(%s)",
866 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
867 igmp_v2_update_group(inm, timer);
868 }
869 }
870
871 out_locked:
872 IGMP_UNLOCK();
873 IN_MULTI_UNLOCK();
874
875 return (0);
876 }
877
878 /*
879 * Update the report timer on a group in response to an IGMPv2 query.
880 *
881 * If we are becoming the reporting member for this group, start the timer.
882 * If we already are the reporting member for this group, and timer is
883 * below the threshold, reset it.
884 *
885 * We may be updating the group for the first time since we switched
886 * to IGMPv3. If we are, then we must clear any recorded source lists,
887 * and transition to REPORTING state; the group timer is overloaded
888 * for group and group-source query responses.
889 *
890 * Unlike IGMPv3, the delay per group should be jittered
891 * to avoid bursts of IGMPv2 reports.
892 */
893 static void
894 igmp_v2_update_group(struct in_multi *inm, const int timer)
895 {
896
897 CTR4(KTR_IGMPV3, "0x%08x: %s/%s timer=%d", __func__,
898 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname, timer);
899
900 IN_MULTI_LOCK_ASSERT();
901
902 switch (inm->inm_state) {
903 case IGMP_NOT_MEMBER:
904 case IGMP_SILENT_MEMBER:
905 break;
906 case IGMP_REPORTING_MEMBER:
907 if (inm->inm_timer != 0 &&
908 inm->inm_timer <= timer) {
909 CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, "
910 "skipping.", __func__);
911 break;
912 }
913 /* FALLTHROUGH */
914 case IGMP_SG_QUERY_PENDING_MEMBER:
915 case IGMP_G_QUERY_PENDING_MEMBER:
916 case IGMP_IDLE_MEMBER:
917 case IGMP_LAZY_MEMBER:
918 case IGMP_AWAKENING_MEMBER:
919 CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__);
920 inm->inm_state = IGMP_REPORTING_MEMBER;
921 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
922 V_current_state_timers_running = 1;
923 break;
924 case IGMP_SLEEPING_MEMBER:
925 CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__);
926 inm->inm_state = IGMP_AWAKENING_MEMBER;
927 break;
928 case IGMP_LEAVING_MEMBER:
929 break;
930 }
931 }
932
933 /*
934 * Process a received IGMPv3 general, group-specific or
935 * group-and-source-specific query.
936 * Assumes m has already been pulled up to the full IGMP message length.
937 * Return 0 if successful, otherwise an appropriate error code is returned.
938 */
939 static int
940 igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip,
941 /*const*/ struct igmpv3 *igmpv3)
942 {
943 struct igmp_ifsoftc *igi;
944 struct in_multi *inm;
945 int is_general_query;
946 uint32_t maxresp, nsrc, qqi;
947 uint16_t timer;
948 uint8_t qrv;
949
950 is_general_query = 0;
951
952 CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname);
953
954 maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */
955 if (maxresp >= 128) {
956 maxresp = IGMP_MANT(igmpv3->igmp_code) <<
957 (IGMP_EXP(igmpv3->igmp_code) + 3);
958 }
959
960 /*
961 * Robustness must never be less than 2 for on-wire IGMPv3.
962 * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make
963 * an exception for interfaces whose IGMPv3 state changes
964 * are redirected to loopback (e.g. MANET).
965 */
966 qrv = IGMP_QRV(igmpv3->igmp_misc);
967 if (qrv < 2) {
968 CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__,
969 qrv, IGMP_RV_INIT);
970 qrv = IGMP_RV_INIT;
971 }
972
973 qqi = igmpv3->igmp_qqi;
974 if (qqi >= 128) {
975 qqi = IGMP_MANT(igmpv3->igmp_qqi) <<
976 (IGMP_EXP(igmpv3->igmp_qqi) + 3);
977 }
978
979 timer = maxresp * PR_FASTHZ / IGMP_TIMER_SCALE;
980 if (timer == 0)
981 timer = 1;
982
983 nsrc = ntohs(igmpv3->igmp_numsrc);
984
985 /*
986 * Validate address fields and versions upfront before
987 * accepting v3 query.
988 * XXX SMPng: Unlocked access to igmpstat counters here.
989 */
990 if (in_nullhost(igmpv3->igmp_group)) {
991 /*
992 * IGMPv3 General Query.
993 *
994 * General Queries SHOULD be directed to 224.0.0.1.
995 * A general query with a source list has undefined
996 * behaviour; discard it.
997 */
998 IGMPSTAT_INC(igps_rcv_gen_queries);
999 if (!in_allhosts(ip->ip_dst) || nsrc > 0) {
1000 IGMPSTAT_INC(igps_rcv_badqueries);
1001 return (0);
1002 }
1003 is_general_query = 1;
1004 } else {
1005 /* Group or group-source specific query. */
1006 if (nsrc == 0)
1007 IGMPSTAT_INC(igps_rcv_group_queries);
1008 else
1009 IGMPSTAT_INC(igps_rcv_gsr_queries);
1010 }
1011
1012 IN_MULTI_LOCK();
1013 IGMP_LOCK();
1014
1015 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
1016 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
1017
1018 if (igi->igi_flags & IGIF_LOOPBACK) {
1019 CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)",
1020 ifp, ifp->if_xname);
1021 goto out_locked;
1022 }
1023
1024 /*
1025 * Discard the v3 query if we're in Compatibility Mode.
1026 * The RFC is not obviously worded that hosts need to stay in
1027 * compatibility mode until the Old Version Querier Present
1028 * timer expires.
1029 */
1030 if (igi->igi_version != IGMP_VERSION_3) {
1031 CTR3(KTR_IGMPV3, "ignore v3 query in v%d mode on ifp %p(%s)",
1032 igi->igi_version, ifp, ifp->if_xname);
1033 goto out_locked;
1034 }
1035
1036 igmp_set_version(igi, IGMP_VERSION_3);
1037 igi->igi_rv = qrv;
1038 igi->igi_qi = qqi;
1039 igi->igi_qri = maxresp;
1040
1041 CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi,
1042 maxresp);
1043
1044 if (is_general_query) {
1045 /*
1046 * Schedule a current-state report on this ifp for
1047 * all groups, possibly containing source lists.
1048 * If there is a pending General Query response
1049 * scheduled earlier than the selected delay, do
1050 * not schedule any other reports.
1051 * Otherwise, reset the interface timer.
1052 */
1053 CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)",
1054 ifp, ifp->if_xname);
1055 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) {
1056 igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer);
1057 V_interface_timers_running = 1;
1058 }
1059 } else {
1060 /*
1061 * Group-source-specific queries are throttled on
1062 * a per-group basis to defeat denial-of-service attempts.
1063 * Queries for groups we are not a member of on this
1064 * link are simply ignored.
1065 */
1066 inm = inm_lookup(ifp, igmpv3->igmp_group);
1067 if (inm == NULL)
1068 goto out_locked;
1069 if (nsrc > 0) {
1070 if (!ratecheck(&inm->inm_lastgsrtv,
1071 &V_igmp_gsrdelay)) {
1072 CTR1(KTR_IGMPV3, "%s: GS query throttled.",
1073 __func__);
1074 IGMPSTAT_INC(igps_drop_gsr_queries);
1075 goto out_locked;
1076 }
1077 }
1078 CTR3(KTR_IGMPV3, "process v3 0x%08x query on ifp %p(%s)",
1079 ntohl(igmpv3->igmp_group.s_addr), ifp, ifp->if_xname);
1080 /*
1081 * If there is a pending General Query response
1082 * scheduled sooner than the selected delay, no
1083 * further report need be scheduled.
1084 * Otherwise, prepare to respond to the
1085 * group-specific or group-and-source query.
1086 */
1087 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer)
1088 igmp_input_v3_group_query(inm, igi, timer, igmpv3);
1089 }
1090
1091 out_locked:
1092 IGMP_UNLOCK();
1093 IN_MULTI_UNLOCK();
1094
1095 return (0);
1096 }
1097
1098 /*
1099 * Process a received IGMPv3 group-specific or group-and-source-specific
1100 * query.
1101 * Return <0 if any error occurred. Currently this is ignored.
1102 */
1103 static int
1104 igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifsoftc *igi,
1105 int timer, /*const*/ struct igmpv3 *igmpv3)
1106 {
1107 int retval;
1108 uint16_t nsrc;
1109
1110 IN_MULTI_LOCK_ASSERT();
1111 IGMP_LOCK_ASSERT();
1112
1113 retval = 0;
1114
1115 switch (inm->inm_state) {
1116 case IGMP_NOT_MEMBER:
1117 case IGMP_SILENT_MEMBER:
1118 case IGMP_SLEEPING_MEMBER:
1119 case IGMP_LAZY_MEMBER:
1120 case IGMP_AWAKENING_MEMBER:
1121 case IGMP_IDLE_MEMBER:
1122 case IGMP_LEAVING_MEMBER:
1123 return (retval);
1124 break;
1125 case IGMP_REPORTING_MEMBER:
1126 case IGMP_G_QUERY_PENDING_MEMBER:
1127 case IGMP_SG_QUERY_PENDING_MEMBER:
1128 break;
1129 }
1130
1131 nsrc = ntohs(igmpv3->igmp_numsrc);
1132
1133 /*
1134 * Deal with group-specific queries upfront.
1135 * If any group query is already pending, purge any recorded
1136 * source-list state if it exists, and schedule a query response
1137 * for this group-specific query.
1138 */
1139 if (nsrc == 0) {
1140 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
1141 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) {
1142 inm_clear_recorded(inm);
1143 timer = min(inm->inm_timer, timer);
1144 }
1145 inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER;
1146 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1147 V_current_state_timers_running = 1;
1148 return (retval);
1149 }
1150
1151 /*
1152 * Deal with the case where a group-and-source-specific query has
1153 * been received but a group-specific query is already pending.
1154 */
1155 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) {
1156 timer = min(inm->inm_timer, timer);
1157 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1158 V_current_state_timers_running = 1;
1159 return (retval);
1160 }
1161
1162 /*
1163 * Finally, deal with the case where a group-and-source-specific
1164 * query has been received, where a response to a previous g-s-r
1165 * query exists, or none exists.
1166 * In this case, we need to parse the source-list which the Querier
1167 * has provided us with and check if we have any source list filter
1168 * entries at T1 for these sources. If we do not, there is no need
1169 * schedule a report and the query may be dropped.
1170 * If we do, we must record them and schedule a current-state
1171 * report for those sources.
1172 * FIXME: Handling source lists larger than 1 mbuf requires that
1173 * we pass the mbuf chain pointer down to this function, and use
1174 * m_getptr() to walk the chain.
1175 */
1176 if (inm->inm_nsrc > 0) {
1177 const struct in_addr *ap;
1178 int i, nrecorded;
1179
1180 ap = (const struct in_addr *)(igmpv3 + 1);
1181 nrecorded = 0;
1182 for (i = 0; i < nsrc; i++, ap++) {
1183 retval = inm_record_source(inm, ap->s_addr);
1184 if (retval < 0)
1185 break;
1186 nrecorded += retval;
1187 }
1188 if (nrecorded > 0) {
1189 CTR1(KTR_IGMPV3,
1190 "%s: schedule response to SG query", __func__);
1191 inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER;
1192 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1193 V_current_state_timers_running = 1;
1194 }
1195 }
1196
1197 return (retval);
1198 }
1199
1200 /*
1201 * Process a received IGMPv1 host membership report.
1202 *
1203 * NOTE: 0.0.0.0 workaround breaks const correctness.
1204 */
1205 static int
1206 igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1207 /*const*/ struct igmp *igmp)
1208 {
1209 struct rm_priotracker in_ifa_tracker;
1210 struct in_ifaddr *ia;
1211 struct in_multi *inm;
1212
1213 IGMPSTAT_INC(igps_rcv_reports);
1214
1215 if (ifp->if_flags & IFF_LOOPBACK)
1216 return (0);
1217
1218 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1219 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1220 IGMPSTAT_INC(igps_rcv_badreports);
1221 return (EINVAL);
1222 }
1223
1224 /*
1225 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1226 * Booting clients may use the source address 0.0.0.0. Some
1227 * IGMP daemons may not know how to use IP_RECVIF to determine
1228 * the interface upon which this message was received.
1229 * Replace 0.0.0.0 with the subnet address if told to do so.
1230 */
1231 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1232 IFP_TO_IA(ifp, ia, &in_ifa_tracker);
1233 if (ia != NULL) {
1234 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1235 ifa_free(&ia->ia_ifa);
1236 }
1237 }
1238
1239 CTR3(KTR_IGMPV3, "process v1 report 0x%08x on ifp %p(%s)",
1240 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1241
1242 /*
1243 * IGMPv1 report suppression.
1244 * If we are a member of this group, and our membership should be
1245 * reported, stop our group timer and transition to the 'lazy' state.
1246 */
1247 IN_MULTI_LOCK();
1248 inm = inm_lookup(ifp, igmp->igmp_group);
1249 if (inm != NULL) {
1250 struct igmp_ifsoftc *igi;
1251
1252 igi = inm->inm_igi;
1253 if (igi == NULL) {
1254 KASSERT(igi != NULL,
1255 ("%s: no igi for ifp %p", __func__, ifp));
1256 goto out_locked;
1257 }
1258
1259 IGMPSTAT_INC(igps_rcv_ourreports);
1260
1261 /*
1262 * If we are in IGMPv3 host mode, do not allow the
1263 * other host's IGMPv1 report to suppress our reports
1264 * unless explicitly configured to do so.
1265 */
1266 if (igi->igi_version == IGMP_VERSION_3) {
1267 if (V_igmp_legacysupp)
1268 igmp_v3_suppress_group_record(inm);
1269 goto out_locked;
1270 }
1271
1272 inm->inm_timer = 0;
1273
1274 switch (inm->inm_state) {
1275 case IGMP_NOT_MEMBER:
1276 case IGMP_SILENT_MEMBER:
1277 break;
1278 case IGMP_IDLE_MEMBER:
1279 case IGMP_LAZY_MEMBER:
1280 case IGMP_AWAKENING_MEMBER:
1281 CTR3(KTR_IGMPV3,
1282 "report suppressed for 0x%08x on ifp %p(%s)",
1283 ntohl(igmp->igmp_group.s_addr), ifp,
1284 ifp->if_xname);
1285 case IGMP_SLEEPING_MEMBER:
1286 inm->inm_state = IGMP_SLEEPING_MEMBER;
1287 break;
1288 case IGMP_REPORTING_MEMBER:
1289 CTR3(KTR_IGMPV3,
1290 "report suppressed for 0x%08x on ifp %p(%s)",
1291 ntohl(igmp->igmp_group.s_addr), ifp,
1292 ifp->if_xname);
1293 if (igi->igi_version == IGMP_VERSION_1)
1294 inm->inm_state = IGMP_LAZY_MEMBER;
1295 else if (igi->igi_version == IGMP_VERSION_2)
1296 inm->inm_state = IGMP_SLEEPING_MEMBER;
1297 break;
1298 case IGMP_G_QUERY_PENDING_MEMBER:
1299 case IGMP_SG_QUERY_PENDING_MEMBER:
1300 case IGMP_LEAVING_MEMBER:
1301 break;
1302 }
1303 }
1304
1305 out_locked:
1306 IN_MULTI_UNLOCK();
1307
1308 return (0);
1309 }
1310
1311 /*
1312 * Process a received IGMPv2 host membership report.
1313 *
1314 * NOTE: 0.0.0.0 workaround breaks const correctness.
1315 */
1316 static int
1317 igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1318 /*const*/ struct igmp *igmp)
1319 {
1320 struct rm_priotracker in_ifa_tracker;
1321 struct in_ifaddr *ia;
1322 struct in_multi *inm;
1323
1324 /*
1325 * Make sure we don't hear our own membership report. Fast
1326 * leave requires knowing that we are the only member of a
1327 * group.
1328 */
1329 IFP_TO_IA(ifp, ia, &in_ifa_tracker);
1330 if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) {
1331 ifa_free(&ia->ia_ifa);
1332 return (0);
1333 }
1334
1335 IGMPSTAT_INC(igps_rcv_reports);
1336
1337 if (ifp->if_flags & IFF_LOOPBACK) {
1338 if (ia != NULL)
1339 ifa_free(&ia->ia_ifa);
1340 return (0);
1341 }
1342
1343 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1344 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1345 if (ia != NULL)
1346 ifa_free(&ia->ia_ifa);
1347 IGMPSTAT_INC(igps_rcv_badreports);
1348 return (EINVAL);
1349 }
1350
1351 /*
1352 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1353 * Booting clients may use the source address 0.0.0.0. Some
1354 * IGMP daemons may not know how to use IP_RECVIF to determine
1355 * the interface upon which this message was received.
1356 * Replace 0.0.0.0 with the subnet address if told to do so.
1357 */
1358 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1359 if (ia != NULL)
1360 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1361 }
1362 if (ia != NULL)
1363 ifa_free(&ia->ia_ifa);
1364
1365 CTR3(KTR_IGMPV3, "process v2 report 0x%08x on ifp %p(%s)",
1366 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1367
1368 /*
1369 * IGMPv2 report suppression.
1370 * If we are a member of this group, and our membership should be
1371 * reported, and our group timer is pending or about to be reset,
1372 * stop our group timer by transitioning to the 'lazy' state.
1373 */
1374 IN_MULTI_LOCK();
1375 inm = inm_lookup(ifp, igmp->igmp_group);
1376 if (inm != NULL) {
1377 struct igmp_ifsoftc *igi;
1378
1379 igi = inm->inm_igi;
1380 KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp));
1381
1382 IGMPSTAT_INC(igps_rcv_ourreports);
1383
1384 /*
1385 * If we are in IGMPv3 host mode, do not allow the
1386 * other host's IGMPv1 report to suppress our reports
1387 * unless explicitly configured to do so.
1388 */
1389 if (igi->igi_version == IGMP_VERSION_3) {
1390 if (V_igmp_legacysupp)
1391 igmp_v3_suppress_group_record(inm);
1392 goto out_locked;
1393 }
1394
1395 inm->inm_timer = 0;
1396
1397 switch (inm->inm_state) {
1398 case IGMP_NOT_MEMBER:
1399 case IGMP_SILENT_MEMBER:
1400 case IGMP_SLEEPING_MEMBER:
1401 break;
1402 case IGMP_REPORTING_MEMBER:
1403 case IGMP_IDLE_MEMBER:
1404 case IGMP_AWAKENING_MEMBER:
1405 CTR3(KTR_IGMPV3,
1406 "report suppressed for 0x%08x on ifp %p(%s)",
1407 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1408 case IGMP_LAZY_MEMBER:
1409 inm->inm_state = IGMP_LAZY_MEMBER;
1410 break;
1411 case IGMP_G_QUERY_PENDING_MEMBER:
1412 case IGMP_SG_QUERY_PENDING_MEMBER:
1413 case IGMP_LEAVING_MEMBER:
1414 break;
1415 }
1416 }
1417
1418 out_locked:
1419 IN_MULTI_UNLOCK();
1420
1421 return (0);
1422 }
1423
1424 int
1425 igmp_input(struct mbuf **mp, int *offp, int proto)
1426 {
1427 int iphlen;
1428 struct ifnet *ifp;
1429 struct igmp *igmp;
1430 struct ip *ip;
1431 struct mbuf *m;
1432 int igmplen;
1433 int minlen;
1434 int queryver;
1435
1436 CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, *mp, *offp);
1437
1438 m = *mp;
1439 ifp = m->m_pkthdr.rcvif;
1440 *mp = NULL;
1441
1442 IGMPSTAT_INC(igps_rcv_total);
1443
1444 ip = mtod(m, struct ip *);
1445 iphlen = *offp;
1446 igmplen = ntohs(ip->ip_len) - iphlen;
1447
1448 /*
1449 * Validate lengths.
1450 */
1451 if (igmplen < IGMP_MINLEN) {
1452 IGMPSTAT_INC(igps_rcv_tooshort);
1453 m_freem(m);
1454 return (IPPROTO_DONE);
1455 }
1456
1457 /*
1458 * Always pullup to the minimum size for v1/v2 or v3
1459 * to amortize calls to m_pullup().
1460 */
1461 minlen = iphlen;
1462 if (igmplen >= IGMP_V3_QUERY_MINLEN)
1463 minlen += IGMP_V3_QUERY_MINLEN;
1464 else
1465 minlen += IGMP_MINLEN;
1466 if ((!M_WRITABLE(m) || m->m_len < minlen) &&
1467 (m = m_pullup(m, minlen)) == NULL) {
1468 IGMPSTAT_INC(igps_rcv_tooshort);
1469 return (IPPROTO_DONE);
1470 }
1471 ip = mtod(m, struct ip *);
1472
1473 /*
1474 * Validate checksum.
1475 */
1476 m->m_data += iphlen;
1477 m->m_len -= iphlen;
1478 igmp = mtod(m, struct igmp *);
1479 if (in_cksum(m, igmplen)) {
1480 IGMPSTAT_INC(igps_rcv_badsum);
1481 m_freem(m);
1482 return (IPPROTO_DONE);
1483 }
1484 m->m_data -= iphlen;
1485 m->m_len += iphlen;
1486
1487 /*
1488 * IGMP control traffic is link-scope, and must have a TTL of 1.
1489 * DVMRP traffic (e.g. mrinfo, mtrace) is an exception;
1490 * probe packets may come from beyond the LAN.
1491 */
1492 if (igmp->igmp_type != IGMP_DVMRP && ip->ip_ttl != 1) {
1493 IGMPSTAT_INC(igps_rcv_badttl);
1494 m_freem(m);
1495 return (IPPROTO_DONE);
1496 }
1497
1498 switch (igmp->igmp_type) {
1499 case IGMP_HOST_MEMBERSHIP_QUERY:
1500 if (igmplen == IGMP_MINLEN) {
1501 if (igmp->igmp_code == 0)
1502 queryver = IGMP_VERSION_1;
1503 else
1504 queryver = IGMP_VERSION_2;
1505 } else if (igmplen >= IGMP_V3_QUERY_MINLEN) {
1506 queryver = IGMP_VERSION_3;
1507 } else {
1508 IGMPSTAT_INC(igps_rcv_tooshort);
1509 m_freem(m);
1510 return (IPPROTO_DONE);
1511 }
1512
1513 switch (queryver) {
1514 case IGMP_VERSION_1:
1515 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1516 if (!V_igmp_v1enable)
1517 break;
1518 if (igmp_input_v1_query(ifp, ip, igmp) != 0) {
1519 m_freem(m);
1520 return (IPPROTO_DONE);
1521 }
1522 break;
1523
1524 case IGMP_VERSION_2:
1525 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1526 if (!V_igmp_v2enable)
1527 break;
1528 if (igmp_input_v2_query(ifp, ip, igmp) != 0) {
1529 m_freem(m);
1530 return (IPPROTO_DONE);
1531 }
1532 break;
1533
1534 case IGMP_VERSION_3: {
1535 struct igmpv3 *igmpv3;
1536 uint16_t igmpv3len;
1537 uint16_t nsrc;
1538
1539 IGMPSTAT_INC(igps_rcv_v3_queries);
1540 igmpv3 = (struct igmpv3 *)igmp;
1541 /*
1542 * Validate length based on source count.
1543 */
1544 nsrc = ntohs(igmpv3->igmp_numsrc);
1545 if (nsrc * sizeof(in_addr_t) >
1546 UINT16_MAX - iphlen - IGMP_V3_QUERY_MINLEN) {
1547 IGMPSTAT_INC(igps_rcv_tooshort);
1548 return (IPPROTO_DONE);
1549 }
1550 /*
1551 * m_pullup() may modify m, so pullup in
1552 * this scope.
1553 */
1554 igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN +
1555 sizeof(struct in_addr) * nsrc;
1556 if ((!M_WRITABLE(m) ||
1557 m->m_len < igmpv3len) &&
1558 (m = m_pullup(m, igmpv3len)) == NULL) {
1559 IGMPSTAT_INC(igps_rcv_tooshort);
1560 return (IPPROTO_DONE);
1561 }
1562 igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *)
1563 + iphlen);
1564 if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) {
1565 m_freem(m);
1566 return (IPPROTO_DONE);
1567 }
1568 }
1569 break;
1570 }
1571 break;
1572
1573 case IGMP_v1_HOST_MEMBERSHIP_REPORT:
1574 if (!V_igmp_v1enable)
1575 break;
1576 if (igmp_input_v1_report(ifp, ip, igmp) != 0) {
1577 m_freem(m);
1578 return (IPPROTO_DONE);
1579 }
1580 break;
1581
1582 case IGMP_v2_HOST_MEMBERSHIP_REPORT:
1583 if (!V_igmp_v2enable)
1584 break;
1585 if (!ip_checkrouteralert(m))
1586 IGMPSTAT_INC(igps_rcv_nora);
1587 if (igmp_input_v2_report(ifp, ip, igmp) != 0) {
1588 m_freem(m);
1589 return (IPPROTO_DONE);
1590 }
1591 break;
1592
1593 case IGMP_v3_HOST_MEMBERSHIP_REPORT:
1594 /*
1595 * Hosts do not need to process IGMPv3 membership reports,
1596 * as report suppression is no longer required.
1597 */
1598 if (!ip_checkrouteralert(m))
1599 IGMPSTAT_INC(igps_rcv_nora);
1600 break;
1601
1602 default:
1603 break;
1604 }
1605
1606 /*
1607 * Pass all valid IGMP packets up to any process(es) listening on a
1608 * raw IGMP socket.
1609 */
1610 *mp = m;
1611 return (rip_input(mp, offp, proto));
1612 }
1613
1614
1615 /*
1616 * Fast timeout handler (global).
1617 * VIMAGE: Timeout handlers are expected to service all vimages.
1618 */
1619 void
1620 igmp_fasttimo(void)
1621 {
1622 VNET_ITERATOR_DECL(vnet_iter);
1623
1624 VNET_LIST_RLOCK_NOSLEEP();
1625 VNET_FOREACH(vnet_iter) {
1626 CURVNET_SET(vnet_iter);
1627 igmp_fasttimo_vnet();
1628 CURVNET_RESTORE();
1629 }
1630 VNET_LIST_RUNLOCK_NOSLEEP();
1631 }
1632
1633 /*
1634 * Fast timeout handler (per-vnet).
1635 * Sends are shuffled off to a netisr to deal with Giant.
1636 *
1637 * VIMAGE: Assume caller has set up our curvnet.
1638 */
1639 static void
1640 igmp_fasttimo_vnet(void)
1641 {
1642 struct mbufq scq; /* State-change packets */
1643 struct mbufq qrq; /* Query response packets */
1644 struct ifnet *ifp;
1645 struct igmp_ifsoftc *igi;
1646 struct ifmultiaddr *ifma;
1647 struct in_multi *inm;
1648 int loop, uri_fasthz;
1649
1650 loop = 0;
1651 uri_fasthz = 0;
1652
1653 /*
1654 * Quick check to see if any work needs to be done, in order to
1655 * minimize the overhead of fasttimo processing.
1656 * SMPng: XXX Unlocked reads.
1657 */
1658 if (!V_current_state_timers_running &&
1659 !V_interface_timers_running &&
1660 !V_state_change_timers_running)
1661 return;
1662
1663 IN_MULTI_LOCK();
1664 IGMP_LOCK();
1665
1666 /*
1667 * IGMPv3 General Query response timer processing.
1668 */
1669 if (V_interface_timers_running) {
1670 CTR1(KTR_IGMPV3, "%s: interface timers running", __func__);
1671
1672 V_interface_timers_running = 0;
1673 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1674 if (igi->igi_v3_timer == 0) {
1675 /* Do nothing. */
1676 } else if (--igi->igi_v3_timer == 0) {
1677 igmp_v3_dispatch_general_query(igi);
1678 } else {
1679 V_interface_timers_running = 1;
1680 }
1681 }
1682 }
1683
1684 if (!V_current_state_timers_running &&
1685 !V_state_change_timers_running)
1686 goto out_locked;
1687
1688 V_current_state_timers_running = 0;
1689 V_state_change_timers_running = 0;
1690
1691 CTR1(KTR_IGMPV3, "%s: state change timers running", __func__);
1692
1693 /*
1694 * IGMPv1/v2/v3 host report and state-change timer processing.
1695 * Note: Processing a v3 group timer may remove a node.
1696 */
1697 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1698 ifp = igi->igi_ifp;
1699
1700 if (igi->igi_version == IGMP_VERSION_3) {
1701 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
1702 uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri *
1703 PR_FASTHZ);
1704 mbufq_init(&qrq, IGMP_MAX_G_GS_PACKETS);
1705 mbufq_init(&scq, IGMP_MAX_STATE_CHANGE_PACKETS);
1706 }
1707
1708 IF_ADDR_RLOCK(ifp);
1709 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1710 if (ifma->ifma_addr->sa_family != AF_INET ||
1711 ifma->ifma_protospec == NULL)
1712 continue;
1713 inm = (struct in_multi *)ifma->ifma_protospec;
1714 switch (igi->igi_version) {
1715 case IGMP_VERSION_1:
1716 case IGMP_VERSION_2:
1717 igmp_v1v2_process_group_timer(inm,
1718 igi->igi_version);
1719 break;
1720 case IGMP_VERSION_3:
1721 igmp_v3_process_group_timers(igi, &qrq,
1722 &scq, inm, uri_fasthz);
1723 break;
1724 }
1725 }
1726 IF_ADDR_RUNLOCK(ifp);
1727
1728 if (igi->igi_version == IGMP_VERSION_3) {
1729 struct in_multi *tinm;
1730
1731 igmp_dispatch_queue(&qrq, 0, loop);
1732 igmp_dispatch_queue(&scq, 0, loop);
1733
1734 /*
1735 * Free the in_multi reference(s) for this
1736 * IGMP lifecycle.
1737 */
1738 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead,
1739 inm_nrele, tinm) {
1740 SLIST_REMOVE_HEAD(&igi->igi_relinmhead,
1741 inm_nrele);
1742 inm_release_locked(inm);
1743 }
1744 }
1745 }
1746
1747 out_locked:
1748 IGMP_UNLOCK();
1749 IN_MULTI_UNLOCK();
1750 }
1751
1752 /*
1753 * Update host report group timer for IGMPv1/v2.
1754 * Will update the global pending timer flags.
1755 */
1756 static void
1757 igmp_v1v2_process_group_timer(struct in_multi *inm, const int version)
1758 {
1759 int report_timer_expired;
1760
1761 IN_MULTI_LOCK_ASSERT();
1762 IGMP_LOCK_ASSERT();
1763
1764 if (inm->inm_timer == 0) {
1765 report_timer_expired = 0;
1766 } else if (--inm->inm_timer == 0) {
1767 report_timer_expired = 1;
1768 } else {
1769 V_current_state_timers_running = 1;
1770 return;
1771 }
1772
1773 switch (inm->inm_state) {
1774 case IGMP_NOT_MEMBER:
1775 case IGMP_SILENT_MEMBER:
1776 case IGMP_IDLE_MEMBER:
1777 case IGMP_LAZY_MEMBER:
1778 case IGMP_SLEEPING_MEMBER:
1779 case IGMP_AWAKENING_MEMBER:
1780 break;
1781 case IGMP_REPORTING_MEMBER:
1782 if (report_timer_expired) {
1783 inm->inm_state = IGMP_IDLE_MEMBER;
1784 (void)igmp_v1v2_queue_report(inm,
1785 (version == IGMP_VERSION_2) ?
1786 IGMP_v2_HOST_MEMBERSHIP_REPORT :
1787 IGMP_v1_HOST_MEMBERSHIP_REPORT);
1788 }
1789 break;
1790 case IGMP_G_QUERY_PENDING_MEMBER:
1791 case IGMP_SG_QUERY_PENDING_MEMBER:
1792 case IGMP_LEAVING_MEMBER:
1793 break;
1794 }
1795 }
1796
1797 /*
1798 * Update a group's timers for IGMPv3.
1799 * Will update the global pending timer flags.
1800 * Note: Unlocked read from igi.
1801 */
1802 static void
1803 igmp_v3_process_group_timers(struct igmp_ifsoftc *igi,
1804 struct mbufq *qrq, struct mbufq *scq,
1805 struct in_multi *inm, const int uri_fasthz)
1806 {
1807 int query_response_timer_expired;
1808 int state_change_retransmit_timer_expired;
1809
1810 IN_MULTI_LOCK_ASSERT();
1811 IGMP_LOCK_ASSERT();
1812
1813 query_response_timer_expired = 0;
1814 state_change_retransmit_timer_expired = 0;
1815
1816 /*
1817 * During a transition from v1/v2 compatibility mode back to v3,
1818 * a group record in REPORTING state may still have its group
1819 * timer active. This is a no-op in this function; it is easier
1820 * to deal with it here than to complicate the slow-timeout path.
1821 */
1822 if (inm->inm_timer == 0) {
1823 query_response_timer_expired = 0;
1824 } else if (--inm->inm_timer == 0) {
1825 query_response_timer_expired = 1;
1826 } else {
1827 V_current_state_timers_running = 1;
1828 }
1829
1830 if (inm->inm_sctimer == 0) {
1831 state_change_retransmit_timer_expired = 0;
1832 } else if (--inm->inm_sctimer == 0) {
1833 state_change_retransmit_timer_expired = 1;
1834 } else {
1835 V_state_change_timers_running = 1;
1836 }
1837
1838 /* We are in fasttimo, so be quick about it. */
1839 if (!state_change_retransmit_timer_expired &&
1840 !query_response_timer_expired)
1841 return;
1842
1843 switch (inm->inm_state) {
1844 case IGMP_NOT_MEMBER:
1845 case IGMP_SILENT_MEMBER:
1846 case IGMP_SLEEPING_MEMBER:
1847 case IGMP_LAZY_MEMBER:
1848 case IGMP_AWAKENING_MEMBER:
1849 case IGMP_IDLE_MEMBER:
1850 break;
1851 case IGMP_G_QUERY_PENDING_MEMBER:
1852 case IGMP_SG_QUERY_PENDING_MEMBER:
1853 /*
1854 * Respond to a previously pending Group-Specific
1855 * or Group-and-Source-Specific query by enqueueing
1856 * the appropriate Current-State report for
1857 * immediate transmission.
1858 */
1859 if (query_response_timer_expired) {
1860 int retval;
1861
1862 retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1,
1863 (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER));
1864 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
1865 __func__, retval);
1866 inm->inm_state = IGMP_REPORTING_MEMBER;
1867 /* XXX Clear recorded sources for next time. */
1868 inm_clear_recorded(inm);
1869 }
1870 /* FALLTHROUGH */
1871 case IGMP_REPORTING_MEMBER:
1872 case IGMP_LEAVING_MEMBER:
1873 if (state_change_retransmit_timer_expired) {
1874 /*
1875 * State-change retransmission timer fired.
1876 * If there are any further pending retransmissions,
1877 * set the global pending state-change flag, and
1878 * reset the timer.
1879 */
1880 if (--inm->inm_scrv > 0) {
1881 inm->inm_sctimer = uri_fasthz;
1882 V_state_change_timers_running = 1;
1883 }
1884 /*
1885 * Retransmit the previously computed state-change
1886 * report. If there are no further pending
1887 * retransmissions, the mbuf queue will be consumed.
1888 * Update T0 state to T1 as we have now sent
1889 * a state-change.
1890 */
1891 (void)igmp_v3_merge_state_changes(inm, scq);
1892
1893 inm_commit(inm);
1894 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
1895 ntohl(inm->inm_addr.s_addr),
1896 inm->inm_ifp->if_xname);
1897
1898 /*
1899 * If we are leaving the group for good, make sure
1900 * we release IGMP's reference to it.
1901 * This release must be deferred using a SLIST,
1902 * as we are called from a loop which traverses
1903 * the in_ifmultiaddr TAILQ.
1904 */
1905 if (inm->inm_state == IGMP_LEAVING_MEMBER &&
1906 inm->inm_scrv == 0) {
1907 inm->inm_state = IGMP_NOT_MEMBER;
1908 SLIST_INSERT_HEAD(&igi->igi_relinmhead,
1909 inm, inm_nrele);
1910 }
1911 }
1912 break;
1913 }
1914 }
1915
1916
1917 /*
1918 * Suppress a group's pending response to a group or source/group query.
1919 *
1920 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency.
1921 * Do NOT update ST1/ST0 as this operation merely suppresses
1922 * the currently pending group record.
1923 * Do NOT suppress the response to a general query. It is possible but
1924 * it would require adding another state or flag.
1925 */
1926 static void
1927 igmp_v3_suppress_group_record(struct in_multi *inm)
1928 {
1929
1930 IN_MULTI_LOCK_ASSERT();
1931
1932 KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3,
1933 ("%s: not IGMPv3 mode on link", __func__));
1934
1935 if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER ||
1936 inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER)
1937 return;
1938
1939 if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
1940 inm_clear_recorded(inm);
1941
1942 inm->inm_timer = 0;
1943 inm->inm_state = IGMP_REPORTING_MEMBER;
1944 }
1945
1946 /*
1947 * Switch to a different IGMP version on the given interface,
1948 * as per Section 7.2.1.
1949 */
1950 static void
1951 igmp_set_version(struct igmp_ifsoftc *igi, const int version)
1952 {
1953 int old_version_timer;
1954
1955 IGMP_LOCK_ASSERT();
1956
1957 CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__,
1958 version, igi->igi_ifp, igi->igi_ifp->if_xname);
1959
1960 if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) {
1961 /*
1962 * Compute the "Older Version Querier Present" timer as per
1963 * Section 8.12.
1964 */
1965 old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri;
1966 old_version_timer *= PR_SLOWHZ;
1967
1968 if (version == IGMP_VERSION_1) {
1969 igi->igi_v1_timer = old_version_timer;
1970 igi->igi_v2_timer = 0;
1971 } else if (version == IGMP_VERSION_2) {
1972 igi->igi_v1_timer = 0;
1973 igi->igi_v2_timer = old_version_timer;
1974 }
1975 }
1976
1977 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
1978 if (igi->igi_version != IGMP_VERSION_2) {
1979 igi->igi_version = IGMP_VERSION_2;
1980 igmp_v3_cancel_link_timers(igi);
1981 }
1982 } else if (igi->igi_v1_timer > 0) {
1983 if (igi->igi_version != IGMP_VERSION_1) {
1984 igi->igi_version = IGMP_VERSION_1;
1985 igmp_v3_cancel_link_timers(igi);
1986 }
1987 }
1988 }
1989
1990 /*
1991 * Cancel pending IGMPv3 timers for the given link and all groups
1992 * joined on it; state-change, general-query, and group-query timers.
1993 *
1994 * Only ever called on a transition from v3 to Compatibility mode. Kill
1995 * the timers stone dead (this may be expensive for large N groups), they
1996 * will be restarted if Compatibility Mode deems that they must be due to
1997 * query processing.
1998 */
1999 static void
2000 igmp_v3_cancel_link_timers(struct igmp_ifsoftc *igi)
2001 {
2002 struct ifmultiaddr *ifma;
2003 struct ifnet *ifp;
2004 struct in_multi *inm, *tinm;
2005
2006 CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__,
2007 igi->igi_ifp, igi->igi_ifp->if_xname);
2008
2009 IN_MULTI_LOCK_ASSERT();
2010 IGMP_LOCK_ASSERT();
2011
2012 /*
2013 * Stop the v3 General Query Response on this link stone dead.
2014 * If fasttimo is woken up due to V_interface_timers_running,
2015 * the flag will be cleared if there are no pending link timers.
2016 */
2017 igi->igi_v3_timer = 0;
2018
2019 /*
2020 * Now clear the current-state and state-change report timers
2021 * for all memberships scoped to this link.
2022 */
2023 ifp = igi->igi_ifp;
2024 IF_ADDR_RLOCK(ifp);
2025 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2026 if (ifma->ifma_addr->sa_family != AF_INET ||
2027 ifma->ifma_protospec == NULL)
2028 continue;
2029 inm = (struct in_multi *)ifma->ifma_protospec;
2030 switch (inm->inm_state) {
2031 case IGMP_NOT_MEMBER:
2032 case IGMP_SILENT_MEMBER:
2033 case IGMP_IDLE_MEMBER:
2034 case IGMP_LAZY_MEMBER:
2035 case IGMP_SLEEPING_MEMBER:
2036 case IGMP_AWAKENING_MEMBER:
2037 /*
2038 * These states are either not relevant in v3 mode,
2039 * or are unreported. Do nothing.
2040 */
2041 break;
2042 case IGMP_LEAVING_MEMBER:
2043 /*
2044 * If we are leaving the group and switching to
2045 * compatibility mode, we need to release the final
2046 * reference held for issuing the INCLUDE {}, and
2047 * transition to REPORTING to ensure the host leave
2048 * message is sent upstream to the old querier --
2049 * transition to NOT would lose the leave and race.
2050 */
2051 SLIST_INSERT_HEAD(&igi->igi_relinmhead, inm, inm_nrele);
2052 /* FALLTHROUGH */
2053 case IGMP_G_QUERY_PENDING_MEMBER:
2054 case IGMP_SG_QUERY_PENDING_MEMBER:
2055 inm_clear_recorded(inm);
2056 /* FALLTHROUGH */
2057 case IGMP_REPORTING_MEMBER:
2058 inm->inm_state = IGMP_REPORTING_MEMBER;
2059 break;
2060 }
2061 /*
2062 * Always clear state-change and group report timers.
2063 * Free any pending IGMPv3 state-change records.
2064 */
2065 inm->inm_sctimer = 0;
2066 inm->inm_timer = 0;
2067 mbufq_drain(&inm->inm_scq);
2068 }
2069 IF_ADDR_RUNLOCK(ifp);
2070 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele, tinm) {
2071 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele);
2072 inm_release_locked(inm);
2073 }
2074 }
2075
2076 /*
2077 * Update the Older Version Querier Present timers for a link.
2078 * See Section 7.2.1 of RFC 3376.
2079 */
2080 static void
2081 igmp_v1v2_process_querier_timers(struct igmp_ifsoftc *igi)
2082 {
2083
2084 IGMP_LOCK_ASSERT();
2085
2086 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) {
2087 /*
2088 * IGMPv1 and IGMPv2 Querier Present timers expired.
2089 *
2090 * Revert to IGMPv3.
2091 */
2092 if (igi->igi_version != IGMP_VERSION_3) {
2093 CTR5(KTR_IGMPV3,
2094 "%s: transition from v%d -> v%d on %p(%s)",
2095 __func__, igi->igi_version, IGMP_VERSION_3,
2096 igi->igi_ifp, igi->igi_ifp->if_xname);
2097 igi->igi_version = IGMP_VERSION_3;
2098 }
2099 } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2100 /*
2101 * IGMPv1 Querier Present timer expired,
2102 * IGMPv2 Querier Present timer running.
2103 * If IGMPv2 was disabled since last timeout,
2104 * revert to IGMPv3.
2105 * If IGMPv2 is enabled, revert to IGMPv2.
2106 */
2107 if (!V_igmp_v2enable) {
2108 CTR5(KTR_IGMPV3,
2109 "%s: transition from v%d -> v%d on %p(%s)",
2110 __func__, igi->igi_version, IGMP_VERSION_3,
2111 igi->igi_ifp, igi->igi_ifp->if_xname);
2112 igi->igi_v2_timer = 0;
2113 igi->igi_version = IGMP_VERSION_3;
2114 } else {
2115 --igi->igi_v2_timer;
2116 if (igi->igi_version != IGMP_VERSION_2) {
2117 CTR5(KTR_IGMPV3,
2118 "%s: transition from v%d -> v%d on %p(%s)",
2119 __func__, igi->igi_version, IGMP_VERSION_2,
2120 igi->igi_ifp, igi->igi_ifp->if_xname);
2121 igi->igi_version = IGMP_VERSION_2;
2122 igmp_v3_cancel_link_timers(igi);
2123 }
2124 }
2125 } else if (igi->igi_v1_timer > 0) {
2126 /*
2127 * IGMPv1 Querier Present timer running.
2128 * Stop IGMPv2 timer if running.
2129 *
2130 * If IGMPv1 was disabled since last timeout,
2131 * revert to IGMPv3.
2132 * If IGMPv1 is enabled, reset IGMPv2 timer if running.
2133 */
2134 if (!V_igmp_v1enable) {
2135 CTR5(KTR_IGMPV3,
2136 "%s: transition from v%d -> v%d on %p(%s)",
2137 __func__, igi->igi_version, IGMP_VERSION_3,
2138 igi->igi_ifp, igi->igi_ifp->if_xname);
2139 igi->igi_v1_timer = 0;
2140 igi->igi_version = IGMP_VERSION_3;
2141 } else {
2142 --igi->igi_v1_timer;
2143 }
2144 if (igi->igi_v2_timer > 0) {
2145 CTR3(KTR_IGMPV3,
2146 "%s: cancel v2 timer on %p(%s)",
2147 __func__, igi->igi_ifp, igi->igi_ifp->if_xname);
2148 igi->igi_v2_timer = 0;
2149 }
2150 }
2151 }
2152
2153 /*
2154 * Global slowtimo handler.
2155 * VIMAGE: Timeout handlers are expected to service all vimages.
2156 */
2157 void
2158 igmp_slowtimo(void)
2159 {
2160 VNET_ITERATOR_DECL(vnet_iter);
2161
2162 VNET_LIST_RLOCK_NOSLEEP();
2163 VNET_FOREACH(vnet_iter) {
2164 CURVNET_SET(vnet_iter);
2165 igmp_slowtimo_vnet();
2166 CURVNET_RESTORE();
2167 }
2168 VNET_LIST_RUNLOCK_NOSLEEP();
2169 }
2170
2171 /*
2172 * Per-vnet slowtimo handler.
2173 */
2174 static void
2175 igmp_slowtimo_vnet(void)
2176 {
2177 struct igmp_ifsoftc *igi;
2178
2179 IGMP_LOCK();
2180
2181 LIST_FOREACH(igi, &V_igi_head, igi_link) {
2182 igmp_v1v2_process_querier_timers(igi);
2183 }
2184
2185 IGMP_UNLOCK();
2186 }
2187
2188 /*
2189 * Dispatch an IGMPv1/v2 host report or leave message.
2190 * These are always small enough to fit inside a single mbuf.
2191 */
2192 static int
2193 igmp_v1v2_queue_report(struct in_multi *inm, const int type)
2194 {
2195 struct ifnet *ifp;
2196 struct igmp *igmp;
2197 struct ip *ip;
2198 struct mbuf *m;
2199
2200 IN_MULTI_LOCK_ASSERT();
2201 IGMP_LOCK_ASSERT();
2202
2203 ifp = inm->inm_ifp;
2204
2205 m = m_gethdr(M_NOWAIT, MT_DATA);
2206 if (m == NULL)
2207 return (ENOMEM);
2208 M_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp));
2209
2210 m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp);
2211
2212 m->m_data += sizeof(struct ip);
2213 m->m_len = sizeof(struct igmp);
2214
2215 igmp = mtod(m, struct igmp *);
2216 igmp->igmp_type = type;
2217 igmp->igmp_code = 0;
2218 igmp->igmp_group = inm->inm_addr;
2219 igmp->igmp_cksum = 0;
2220 igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp));
2221
2222 m->m_data -= sizeof(struct ip);
2223 m->m_len += sizeof(struct ip);
2224
2225 ip = mtod(m, struct ip *);
2226 ip->ip_tos = 0;
2227 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct igmp));
2228 ip->ip_off = 0;
2229 ip->ip_p = IPPROTO_IGMP;
2230 ip->ip_src.s_addr = INADDR_ANY;
2231
2232 if (type == IGMP_HOST_LEAVE_MESSAGE)
2233 ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP);
2234 else
2235 ip->ip_dst = inm->inm_addr;
2236
2237 igmp_save_context(m, ifp);
2238
2239 m->m_flags |= M_IGMPV2;
2240 if (inm->inm_igi->igi_flags & IGIF_LOOPBACK)
2241 m->m_flags |= M_IGMP_LOOP;
2242
2243 CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m);
2244 netisr_dispatch(NETISR_IGMP, m);
2245
2246 return (0);
2247 }
2248
2249 /*
2250 * Process a state change from the upper layer for the given IPv4 group.
2251 *
2252 * Each socket holds a reference on the in_multi in its own ip_moptions.
2253 * The socket layer will have made the necessary updates to.the group
2254 * state, it is now up to IGMP to issue a state change report if there
2255 * has been any change between T0 (when the last state-change was issued)
2256 * and T1 (now).
2257 *
2258 * We use the IGMPv3 state machine at group level. The IGMP module
2259 * however makes the decision as to which IGMP protocol version to speak.
2260 * A state change *from* INCLUDE {} always means an initial join.
2261 * A state change *to* INCLUDE {} always means a final leave.
2262 *
2263 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can
2264 * save ourselves a bunch of work; any exclusive mode groups need not
2265 * compute source filter lists.
2266 *
2267 * VIMAGE: curvnet should have been set by caller, as this routine
2268 * is called from the socket option handlers.
2269 */
2270 int
2271 igmp_change_state(struct in_multi *inm)
2272 {
2273 struct igmp_ifsoftc *igi;
2274 struct ifnet *ifp;
2275 int error;
2276
2277 IN_MULTI_LOCK_ASSERT();
2278
2279 error = 0;
2280
2281 /*
2282 * Try to detect if the upper layer just asked us to change state
2283 * for an interface which has now gone away.
2284 */
2285 KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__));
2286 ifp = inm->inm_ifma->ifma_ifp;
2287 /*
2288 * Sanity check that netinet's notion of ifp is the
2289 * same as net's.
2290 */
2291 KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__));
2292
2293 IGMP_LOCK();
2294
2295 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
2296 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
2297
2298 /*
2299 * If we detect a state transition to or from MCAST_UNDEFINED
2300 * for this group, then we are starting or finishing an IGMP
2301 * life cycle for this group.
2302 */
2303 if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) {
2304 CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__,
2305 inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode);
2306 if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) {
2307 CTR1(KTR_IGMPV3, "%s: initial join", __func__);
2308 error = igmp_initial_join(inm, igi);
2309 goto out_locked;
2310 } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) {
2311 CTR1(KTR_IGMPV3, "%s: final leave", __func__);
2312 igmp_final_leave(inm, igi);
2313 goto out_locked;
2314 }
2315 } else {
2316 CTR1(KTR_IGMPV3, "%s: filter set change", __func__);
2317 }
2318
2319 error = igmp_handle_state_change(inm, igi);
2320
2321 out_locked:
2322 IGMP_UNLOCK();
2323 return (error);
2324 }
2325
2326 /*
2327 * Perform the initial join for an IGMP group.
2328 *
2329 * When joining a group:
2330 * If the group should have its IGMP traffic suppressed, do nothing.
2331 * IGMPv1 starts sending IGMPv1 host membership reports.
2332 * IGMPv2 starts sending IGMPv2 host membership reports.
2333 * IGMPv3 will schedule an IGMPv3 state-change report containing the
2334 * initial state of the membership.
2335 */
2336 static int
2337 igmp_initial_join(struct in_multi *inm, struct igmp_ifsoftc *igi)
2338 {
2339 struct ifnet *ifp;
2340 struct mbufq *mq;
2341 int error, retval, syncstates;
2342
2343 CTR4(KTR_IGMPV3, "%s: initial join 0x%08x on ifp %p(%s)", __func__,
2344 ntohl(inm->inm_addr.s_addr), inm->inm_ifp, inm->inm_ifp->if_xname);
2345
2346 error = 0;
2347 syncstates = 1;
2348
2349 ifp = inm->inm_ifp;
2350
2351 IN_MULTI_LOCK_ASSERT();
2352 IGMP_LOCK_ASSERT();
2353
2354 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2355
2356 /*
2357 * Groups joined on loopback or marked as 'not reported',
2358 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and
2359 * are never reported in any IGMP protocol exchanges.
2360 * All other groups enter the appropriate IGMP state machine
2361 * for the version in use on this link.
2362 * A link marked as IGIF_SILENT causes IGMP to be completely
2363 * disabled for the link.
2364 */
2365 if ((ifp->if_flags & IFF_LOOPBACK) ||
2366 (igi->igi_flags & IGIF_SILENT) ||
2367 !igmp_isgroupreported(inm->inm_addr)) {
2368 CTR1(KTR_IGMPV3,
2369 "%s: not kicking state machine for silent group", __func__);
2370 inm->inm_state = IGMP_SILENT_MEMBER;
2371 inm->inm_timer = 0;
2372 } else {
2373 /*
2374 * Deal with overlapping in_multi lifecycle.
2375 * If this group was LEAVING, then make sure
2376 * we drop the reference we picked up to keep the
2377 * group around for the final INCLUDE {} enqueue.
2378 */
2379 if (igi->igi_version == IGMP_VERSION_3 &&
2380 inm->inm_state == IGMP_LEAVING_MEMBER)
2381 inm_release_locked(inm);
2382
2383 inm->inm_state = IGMP_REPORTING_MEMBER;
2384
2385 switch (igi->igi_version) {
2386 case IGMP_VERSION_1:
2387 case IGMP_VERSION_2:
2388 inm->inm_state = IGMP_IDLE_MEMBER;
2389 error = igmp_v1v2_queue_report(inm,
2390 (igi->igi_version == IGMP_VERSION_2) ?
2391 IGMP_v2_HOST_MEMBERSHIP_REPORT :
2392 IGMP_v1_HOST_MEMBERSHIP_REPORT);
2393 if (error == 0) {
2394 inm->inm_timer = IGMP_RANDOM_DELAY(
2395 IGMP_V1V2_MAX_RI * PR_FASTHZ);
2396 V_current_state_timers_running = 1;
2397 }
2398 break;
2399
2400 case IGMP_VERSION_3:
2401 /*
2402 * Defer update of T0 to T1, until the first copy
2403 * of the state change has been transmitted.
2404 */
2405 syncstates = 0;
2406
2407 /*
2408 * Immediately enqueue a State-Change Report for
2409 * this interface, freeing any previous reports.
2410 * Don't kick the timers if there is nothing to do,
2411 * or if an error occurred.
2412 */
2413 mq = &inm->inm_scq;
2414 mbufq_drain(mq);
2415 retval = igmp_v3_enqueue_group_record(mq, inm, 1,
2416 0, 0);
2417 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
2418 __func__, retval);
2419 if (retval <= 0) {
2420 error = retval * -1;
2421 break;
2422 }
2423
2424 /*
2425 * Schedule transmission of pending state-change
2426 * report up to RV times for this link. The timer
2427 * will fire at the next igmp_fasttimo (~200ms),
2428 * giving us an opportunity to merge the reports.
2429 */
2430 if (igi->igi_flags & IGIF_LOOPBACK) {
2431 inm->inm_scrv = 1;
2432 } else {
2433 KASSERT(igi->igi_rv > 1,
2434 ("%s: invalid robustness %d", __func__,
2435 igi->igi_rv));
2436 inm->inm_scrv = igi->igi_rv;
2437 }
2438 inm->inm_sctimer = 1;
2439 V_state_change_timers_running = 1;
2440
2441 error = 0;
2442 break;
2443 }
2444 }
2445
2446 /*
2447 * Only update the T0 state if state change is atomic,
2448 * i.e. we don't need to wait for a timer to fire before we
2449 * can consider the state change to have been communicated.
2450 */
2451 if (syncstates) {
2452 inm_commit(inm);
2453 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2454 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2455 }
2456
2457 return (error);
2458 }
2459
2460 /*
2461 * Issue an intermediate state change during the IGMP life-cycle.
2462 */
2463 static int
2464 igmp_handle_state_change(struct in_multi *inm, struct igmp_ifsoftc *igi)
2465 {
2466 struct ifnet *ifp;
2467 int retval;
2468
2469 CTR4(KTR_IGMPV3, "%s: state change for 0x%08x on ifp %p(%s)", __func__,
2470 ntohl(inm->inm_addr.s_addr), inm->inm_ifp, inm->inm_ifp->if_xname);
2471
2472 ifp = inm->inm_ifp;
2473
2474 IN_MULTI_LOCK_ASSERT();
2475 IGMP_LOCK_ASSERT();
2476
2477 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2478
2479 if ((ifp->if_flags & IFF_LOOPBACK) ||
2480 (igi->igi_flags & IGIF_SILENT) ||
2481 !igmp_isgroupreported(inm->inm_addr) ||
2482 (igi->igi_version != IGMP_VERSION_3)) {
2483 if (!igmp_isgroupreported(inm->inm_addr)) {
2484 CTR1(KTR_IGMPV3,
2485 "%s: not kicking state machine for silent group", __func__);
2486 }
2487 CTR1(KTR_IGMPV3, "%s: nothing to do", __func__);
2488 inm_commit(inm);
2489 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2490 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2491 return (0);
2492 }
2493
2494 mbufq_drain(&inm->inm_scq);
2495
2496 retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0);
2497 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval);
2498 if (retval <= 0)
2499 return (-retval);
2500
2501 /*
2502 * If record(s) were enqueued, start the state-change
2503 * report timer for this group.
2504 */
2505 inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv);
2506 inm->inm_sctimer = 1;
2507 V_state_change_timers_running = 1;
2508
2509 return (0);
2510 }
2511
2512 /*
2513 * Perform the final leave for an IGMP group.
2514 *
2515 * When leaving a group:
2516 * IGMPv1 does nothing.
2517 * IGMPv2 sends a host leave message, if and only if we are the reporter.
2518 * IGMPv3 enqueues a state-change report containing a transition
2519 * to INCLUDE {} for immediate transmission.
2520 */
2521 static void
2522 igmp_final_leave(struct in_multi *inm, struct igmp_ifsoftc *igi)
2523 {
2524 int syncstates;
2525
2526 syncstates = 1;
2527
2528 CTR4(KTR_IGMPV3, "%s: final leave 0x%08x on ifp %p(%s)",
2529 __func__, ntohl(inm->inm_addr.s_addr), inm->inm_ifp,
2530 inm->inm_ifp->if_xname);
2531
2532 IN_MULTI_LOCK_ASSERT();
2533 IGMP_LOCK_ASSERT();
2534
2535 switch (inm->inm_state) {
2536 case IGMP_NOT_MEMBER:
2537 case IGMP_SILENT_MEMBER:
2538 case IGMP_LEAVING_MEMBER:
2539 /* Already leaving or left; do nothing. */
2540 CTR1(KTR_IGMPV3,
2541 "%s: not kicking state machine for silent group", __func__);
2542 break;
2543 case IGMP_REPORTING_MEMBER:
2544 case IGMP_IDLE_MEMBER:
2545 case IGMP_G_QUERY_PENDING_MEMBER:
2546 case IGMP_SG_QUERY_PENDING_MEMBER:
2547 if (igi->igi_version == IGMP_VERSION_2) {
2548 #ifdef INVARIANTS
2549 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
2550 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
2551 panic("%s: IGMPv3 state reached, not IGMPv3 mode",
2552 __func__);
2553 #endif
2554 igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE);
2555 inm->inm_state = IGMP_NOT_MEMBER;
2556 } else if (igi->igi_version == IGMP_VERSION_3) {
2557 /*
2558 * Stop group timer and all pending reports.
2559 * Immediately enqueue a state-change report
2560 * TO_IN {} to be sent on the next fast timeout,
2561 * giving us an opportunity to merge reports.
2562 */
2563 mbufq_drain(&inm->inm_scq);
2564 inm->inm_timer = 0;
2565 if (igi->igi_flags & IGIF_LOOPBACK) {
2566 inm->inm_scrv = 1;
2567 } else {
2568 inm->inm_scrv = igi->igi_rv;
2569 }
2570 CTR4(KTR_IGMPV3, "%s: Leaving 0x%08x/%s with %d "
2571 "pending retransmissions.", __func__,
2572 ntohl(inm->inm_addr.s_addr),
2573 inm->inm_ifp->if_xname, inm->inm_scrv);
2574 if (inm->inm_scrv == 0) {
2575 inm->inm_state = IGMP_NOT_MEMBER;
2576 inm->inm_sctimer = 0;
2577 } else {
2578 int retval;
2579
2580 inm_acquire_locked(inm);
2581
2582 retval = igmp_v3_enqueue_group_record(
2583 &inm->inm_scq, inm, 1, 0, 0);
2584 KASSERT(retval != 0,
2585 ("%s: enqueue record = %d", __func__,
2586 retval));
2587
2588 inm->inm_state = IGMP_LEAVING_MEMBER;
2589 inm->inm_sctimer = 1;
2590 V_state_change_timers_running = 1;
2591 syncstates = 0;
2592 }
2593 break;
2594 }
2595 break;
2596 case IGMP_LAZY_MEMBER:
2597 case IGMP_SLEEPING_MEMBER:
2598 case IGMP_AWAKENING_MEMBER:
2599 /* Our reports are suppressed; do nothing. */
2600 break;
2601 }
2602
2603 if (syncstates) {
2604 inm_commit(inm);
2605 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2606 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2607 inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
2608 CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for 0x%08x/%s",
2609 __func__, ntohl(inm->inm_addr.s_addr),
2610 inm->inm_ifp->if_xname);
2611 }
2612 }
2613
2614 /*
2615 * Enqueue an IGMPv3 group record to the given output queue.
2616 *
2617 * XXX This function could do with having the allocation code
2618 * split out, and the multiple-tree-walks coalesced into a single
2619 * routine as has been done in igmp_v3_enqueue_filter_change().
2620 *
2621 * If is_state_change is zero, a current-state record is appended.
2622 * If is_state_change is non-zero, a state-change report is appended.
2623 *
2624 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2625 * If is_group_query is zero, and if there is a packet with free space
2626 * at the tail of the queue, it will be appended to providing there
2627 * is enough free space.
2628 * Otherwise a new mbuf packet chain is allocated.
2629 *
2630 * If is_source_query is non-zero, each source is checked to see if
2631 * it was recorded for a Group-Source query, and will be omitted if
2632 * it is not both in-mode and recorded.
2633 *
2634 * The function will attempt to allocate leading space in the packet
2635 * for the IP/IGMP header to be prepended without fragmenting the chain.
2636 *
2637 * If successful the size of all data appended to the queue is returned,
2638 * otherwise an error code less than zero is returned, or zero if
2639 * no record(s) were appended.
2640 */
2641 static int
2642 igmp_v3_enqueue_group_record(struct mbufq *mq, struct in_multi *inm,
2643 const int is_state_change, const int is_group_query,
2644 const int is_source_query)
2645 {
2646 struct igmp_grouprec ig;
2647 struct igmp_grouprec *pig;
2648 struct ifnet *ifp;
2649 struct ip_msource *ims, *nims;
2650 struct mbuf *m0, *m, *md;
2651 int error, is_filter_list_change;
2652 int minrec0len, m0srcs, msrcs, nbytes, off;
2653 int record_has_sources;
2654 int now;
2655 int type;
2656 in_addr_t naddr;
2657 uint8_t mode;
2658
2659 IN_MULTI_LOCK_ASSERT();
2660
2661 error = 0;
2662 ifp = inm->inm_ifp;
2663 is_filter_list_change = 0;
2664 m = NULL;
2665 m0 = NULL;
2666 m0srcs = 0;
2667 msrcs = 0;
2668 nbytes = 0;
2669 nims = NULL;
2670 record_has_sources = 1;
2671 pig = NULL;
2672 type = IGMP_DO_NOTHING;
2673 mode = inm->inm_st[1].iss_fmode;
2674
2675 /*
2676 * If we did not transition out of ASM mode during t0->t1,
2677 * and there are no source nodes to process, we can skip
2678 * the generation of source records.
2679 */
2680 if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 &&
2681 inm->inm_nsrc == 0)
2682 record_has_sources = 0;
2683
2684 if (is_state_change) {
2685 /*
2686 * Queue a state change record.
2687 * If the mode did not change, and there are non-ASM
2688 * listeners or source filters present,
2689 * we potentially need to issue two records for the group.
2690 * If we are transitioning to MCAST_UNDEFINED, we need
2691 * not send any sources.
2692 * If there are ASM listeners, and there was no filter
2693 * mode transition of any kind, do nothing.
2694 */
2695 if (mode != inm->inm_st[0].iss_fmode) {
2696 if (mode == MCAST_EXCLUDE) {
2697 CTR1(KTR_IGMPV3, "%s: change to EXCLUDE",
2698 __func__);
2699 type = IGMP_CHANGE_TO_EXCLUDE_MODE;
2700 } else {
2701 CTR1(KTR_IGMPV3, "%s: change to INCLUDE",
2702 __func__);
2703 type = IGMP_CHANGE_TO_INCLUDE_MODE;
2704 if (mode == MCAST_UNDEFINED)
2705 record_has_sources = 0;
2706 }
2707 } else {
2708 if (record_has_sources) {
2709 is_filter_list_change = 1;
2710 } else {
2711 type = IGMP_DO_NOTHING;
2712 }
2713 }
2714 } else {
2715 /*
2716 * Queue a current state record.
2717 */
2718 if (mode == MCAST_EXCLUDE) {
2719 type = IGMP_MODE_IS_EXCLUDE;
2720 } else if (mode == MCAST_INCLUDE) {
2721 type = IGMP_MODE_IS_INCLUDE;
2722 KASSERT(inm->inm_st[1].iss_asm == 0,
2723 ("%s: inm %p is INCLUDE but ASM count is %d",
2724 __func__, inm, inm->inm_st[1].iss_asm));
2725 }
2726 }
2727
2728 /*
2729 * Generate the filter list changes using a separate function.
2730 */
2731 if (is_filter_list_change)
2732 return (igmp_v3_enqueue_filter_change(mq, inm));
2733
2734 if (type == IGMP_DO_NOTHING) {
2735 CTR3(KTR_IGMPV3, "%s: nothing to do for 0x%08x/%s", __func__,
2736 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2737 return (0);
2738 }
2739
2740 /*
2741 * If any sources are present, we must be able to fit at least
2742 * one in the trailing space of the tail packet's mbuf,
2743 * ideally more.
2744 */
2745 minrec0len = sizeof(struct igmp_grouprec);
2746 if (record_has_sources)
2747 minrec0len += sizeof(in_addr_t);
2748
2749 CTR4(KTR_IGMPV3, "%s: queueing %s for 0x%08x/%s", __func__,
2750 igmp_rec_type_to_str(type), ntohl(inm->inm_addr.s_addr),
2751 inm->inm_ifp->if_xname);
2752
2753 /*
2754 * Check if we have a packet in the tail of the queue for this
2755 * group into which the first group record for this group will fit.
2756 * Otherwise allocate a new packet.
2757 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT.
2758 * Note: Group records for G/GSR query responses MUST be sent
2759 * in their own packet.
2760 */
2761 m0 = mbufq_last(mq);
2762 if (!is_group_query &&
2763 m0 != NULL &&
2764 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) &&
2765 (m0->m_pkthdr.len + minrec0len) <
2766 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
2767 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2768 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2769 m = m0;
2770 CTR1(KTR_IGMPV3, "%s: use existing packet", __func__);
2771 } else {
2772 if (mbufq_full(mq)) {
2773 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2774 return (-ENOMEM);
2775 }
2776 m = NULL;
2777 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2778 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2779 if (!is_state_change && !is_group_query) {
2780 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2781 if (m)
2782 m->m_data += IGMP_LEADINGSPACE;
2783 }
2784 if (m == NULL) {
2785 m = m_gethdr(M_NOWAIT, MT_DATA);
2786 if (m)
2787 M_ALIGN(m, IGMP_LEADINGSPACE);
2788 }
2789 if (m == NULL)
2790 return (-ENOMEM);
2791
2792 igmp_save_context(m, ifp);
2793
2794 CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__);
2795 }
2796
2797 /*
2798 * Append group record.
2799 * If we have sources, we don't know how many yet.
2800 */
2801 ig.ig_type = type;
2802 ig.ig_datalen = 0;
2803 ig.ig_numsrc = 0;
2804 ig.ig_group = inm->inm_addr;
2805 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2806 if (m != m0)
2807 m_freem(m);
2808 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2809 return (-ENOMEM);
2810 }
2811 nbytes += sizeof(struct igmp_grouprec);
2812
2813 /*
2814 * Append as many sources as will fit in the first packet.
2815 * If we are appending to a new packet, the chain allocation
2816 * may potentially use clusters; use m_getptr() in this case.
2817 * If we are appending to an existing packet, we need to obtain
2818 * a pointer to the group record after m_append(), in case a new
2819 * mbuf was allocated.
2820 * Only append sources which are in-mode at t1. If we are
2821 * transitioning to MCAST_UNDEFINED state on the group, do not
2822 * include source entries.
2823 * Only report recorded sources in our filter set when responding
2824 * to a group-source query.
2825 */
2826 if (record_has_sources) {
2827 if (m == m0) {
2828 md = m_last(m);
2829 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2830 md->m_len - nbytes);
2831 } else {
2832 md = m_getptr(m, 0, &off);
2833 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2834 off);
2835 }
2836 msrcs = 0;
2837 RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) {
2838 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x", __func__,
2839 ims->ims_haddr);
2840 now = ims_get_mode(inm, ims, 1);
2841 CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now);
2842 if ((now != mode) ||
2843 (now == mode && mode == MCAST_UNDEFINED)) {
2844 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2845 continue;
2846 }
2847 if (is_source_query && ims->ims_stp == 0) {
2848 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2849 __func__);
2850 continue;
2851 }
2852 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2853 naddr = htonl(ims->ims_haddr);
2854 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2855 if (m != m0)
2856 m_freem(m);
2857 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2858 __func__);
2859 return (-ENOMEM);
2860 }
2861 nbytes += sizeof(in_addr_t);
2862 ++msrcs;
2863 if (msrcs == m0srcs)
2864 break;
2865 }
2866 CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__,
2867 msrcs);
2868 pig->ig_numsrc = htons(msrcs);
2869 nbytes += (msrcs * sizeof(in_addr_t));
2870 }
2871
2872 if (is_source_query && msrcs == 0) {
2873 CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__);
2874 if (m != m0)
2875 m_freem(m);
2876 return (0);
2877 }
2878
2879 /*
2880 * We are good to go with first packet.
2881 */
2882 if (m != m0) {
2883 CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__);
2884 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2885 mbufq_enqueue(mq, m);
2886 } else
2887 m->m_pkthdr.PH_vt.vt_nrecs++;
2888
2889 /*
2890 * No further work needed if no source list in packet(s).
2891 */
2892 if (!record_has_sources)
2893 return (nbytes);
2894
2895 /*
2896 * Whilst sources remain to be announced, we need to allocate
2897 * a new packet and fill out as many sources as will fit.
2898 * Always try for a cluster first.
2899 */
2900 while (nims != NULL) {
2901 if (mbufq_full(mq)) {
2902 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2903 return (-ENOMEM);
2904 }
2905 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2906 if (m)
2907 m->m_data += IGMP_LEADINGSPACE;
2908 if (m == NULL) {
2909 m = m_gethdr(M_NOWAIT, MT_DATA);
2910 if (m)
2911 M_ALIGN(m, IGMP_LEADINGSPACE);
2912 }
2913 if (m == NULL)
2914 return (-ENOMEM);
2915 igmp_save_context(m, ifp);
2916 md = m_getptr(m, 0, &off);
2917 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off);
2918 CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__);
2919
2920 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2921 if (m != m0)
2922 m_freem(m);
2923 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2924 return (-ENOMEM);
2925 }
2926 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2927 nbytes += sizeof(struct igmp_grouprec);
2928
2929 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2930 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2931
2932 msrcs = 0;
2933 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
2934 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x", __func__,
2935 ims->ims_haddr);
2936 now = ims_get_mode(inm, ims, 1);
2937 if ((now != mode) ||
2938 (now == mode && mode == MCAST_UNDEFINED)) {
2939 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2940 continue;
2941 }
2942 if (is_source_query && ims->ims_stp == 0) {
2943 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2944 __func__);
2945 continue;
2946 }
2947 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2948 naddr = htonl(ims->ims_haddr);
2949 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2950 if (m != m0)
2951 m_freem(m);
2952 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2953 __func__);
2954 return (-ENOMEM);
2955 }
2956 ++msrcs;
2957 if (msrcs == m0srcs)
2958 break;
2959 }
2960 pig->ig_numsrc = htons(msrcs);
2961 nbytes += (msrcs * sizeof(in_addr_t));
2962
2963 CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__);
2964 mbufq_enqueue(mq, m);
2965 }
2966
2967 return (nbytes);
2968 }
2969
2970 /*
2971 * Type used to mark record pass completion.
2972 * We exploit the fact we can cast to this easily from the
2973 * current filter modes on each ip_msource node.
2974 */
2975 typedef enum {
2976 REC_NONE = 0x00, /* MCAST_UNDEFINED */
2977 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
2978 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
2979 REC_FULL = REC_ALLOW | REC_BLOCK
2980 } rectype_t;
2981
2982 /*
2983 * Enqueue an IGMPv3 filter list change to the given output queue.
2984 *
2985 * Source list filter state is held in an RB-tree. When the filter list
2986 * for a group is changed without changing its mode, we need to compute
2987 * the deltas between T0 and T1 for each source in the filter set,
2988 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
2989 *
2990 * As we may potentially queue two record types, and the entire R-B tree
2991 * needs to be walked at once, we break this out into its own function
2992 * so we can generate a tightly packed queue of packets.
2993 *
2994 * XXX This could be written to only use one tree walk, although that makes
2995 * serializing into the mbuf chains a bit harder. For now we do two walks
2996 * which makes things easier on us, and it may or may not be harder on
2997 * the L2 cache.
2998 *
2999 * If successful the size of all data appended to the queue is returned,
3000 * otherwise an error code less than zero is returned, or zero if
3001 * no record(s) were appended.
3002 */
3003 static int
3004 igmp_v3_enqueue_filter_change(struct mbufq *mq, struct in_multi *inm)
3005 {
3006 static const int MINRECLEN =
3007 sizeof(struct igmp_grouprec) + sizeof(in_addr_t);
3008 struct ifnet *ifp;
3009 struct igmp_grouprec ig;
3010 struct igmp_grouprec *pig;
3011 struct ip_msource *ims, *nims;
3012 struct mbuf *m, *m0, *md;
3013 in_addr_t naddr;
3014 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
3015 int nallow, nblock;
3016 uint8_t mode, now, then;
3017 rectype_t crt, drt, nrt;
3018
3019 IN_MULTI_LOCK_ASSERT();
3020
3021 if (inm->inm_nsrc == 0 ||
3022 (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0))
3023 return (0);
3024
3025 ifp = inm->inm_ifp; /* interface */
3026 mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */
3027 crt = REC_NONE; /* current group record type */
3028 drt = REC_NONE; /* mask of completed group record types */
3029 nrt = REC_NONE; /* record type for current node */
3030 m0srcs = 0; /* # source which will fit in current mbuf chain */
3031 nbytes = 0; /* # of bytes appended to group's state-change queue */
3032 npbytes = 0; /* # of bytes appended this packet */
3033 rsrcs = 0; /* # sources encoded in current record */
3034 schanged = 0; /* # nodes encoded in overall filter change */
3035 nallow = 0; /* # of source entries in ALLOW_NEW */
3036 nblock = 0; /* # of source entries in BLOCK_OLD */
3037 nims = NULL; /* next tree node pointer */
3038
3039 /*
3040 * For each possible filter record mode.
3041 * The first kind of source we encounter tells us which
3042 * is the first kind of record we start appending.
3043 * If a node transitioned to UNDEFINED at t1, its mode is treated
3044 * as the inverse of the group's filter mode.
3045 */
3046 while (drt != REC_FULL) {
3047 do {
3048 m0 = mbufq_last(mq);
3049 if (m0 != NULL &&
3050 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
3051 IGMP_V3_REPORT_MAXRECS) &&
3052 (m0->m_pkthdr.len + MINRECLEN) <
3053 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
3054 m = m0;
3055 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3056 sizeof(struct igmp_grouprec)) /
3057 sizeof(in_addr_t);
3058 CTR1(KTR_IGMPV3,
3059 "%s: use previous packet", __func__);
3060 } else {
3061 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3062 if (m)
3063 m->m_data += IGMP_LEADINGSPACE;
3064 if (m == NULL) {
3065 m = m_gethdr(M_NOWAIT, MT_DATA);
3066 if (m)
3067 M_ALIGN(m, IGMP_LEADINGSPACE);
3068 }
3069 if (m == NULL) {
3070 CTR1(KTR_IGMPV3,
3071 "%s: m_get*() failed", __func__);
3072 return (-ENOMEM);
3073 }
3074 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3075 igmp_save_context(m, ifp);
3076 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
3077 sizeof(struct igmp_grouprec)) /
3078 sizeof(in_addr_t);
3079 npbytes = 0;
3080 CTR1(KTR_IGMPV3,
3081 "%s: allocated new packet", __func__);
3082 }
3083 /*
3084 * Append the IGMP group record header to the
3085 * current packet's data area.
3086 * Recalculate pointer to free space for next
3087 * group record, in case m_append() allocated
3088 * a new mbuf or cluster.
3089 */
3090 memset(&ig, 0, sizeof(ig));
3091 ig.ig_group = inm->inm_addr;
3092 if (!m_append(m, sizeof(ig), (void *)&ig)) {
3093 if (m != m0)
3094 m_freem(m);
3095 CTR1(KTR_IGMPV3,
3096 "%s: m_append() failed", __func__);
3097 return (-ENOMEM);
3098 }
3099 npbytes += sizeof(struct igmp_grouprec);
3100 if (m != m0) {
3101 /* new packet; offset in c hain */
3102 md = m_getptr(m, npbytes -
3103 sizeof(struct igmp_grouprec), &off);
3104 pig = (struct igmp_grouprec *)(mtod(md,
3105 uint8_t *) + off);
3106 } else {
3107 /* current packet; offset from last append */
3108 md = m_last(m);
3109 pig = (struct igmp_grouprec *)(mtod(md,
3110 uint8_t *) + md->m_len -
3111 sizeof(struct igmp_grouprec));
3112 }
3113 /*
3114 * Begin walking the tree for this record type
3115 * pass, or continue from where we left off
3116 * previously if we had to allocate a new packet.
3117 * Only report deltas in-mode at t1.
3118 * We need not report included sources as allowed
3119 * if we are in inclusive mode on the group,
3120 * however the converse is not true.
3121 */
3122 rsrcs = 0;
3123 if (nims == NULL)
3124 nims = RB_MIN(ip_msource_tree, &inm->inm_srcs);
3125 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
3126 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x",
3127 __func__, ims->ims_haddr);
3128 now = ims_get_mode(inm, ims, 1);
3129 then = ims_get_mode(inm, ims, 0);
3130 CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d",
3131 __func__, then, now);
3132 if (now == then) {
3133 CTR1(KTR_IGMPV3,
3134 "%s: skip unchanged", __func__);
3135 continue;
3136 }
3137 if (mode == MCAST_EXCLUDE &&
3138 now == MCAST_INCLUDE) {
3139 CTR1(KTR_IGMPV3,
3140 "%s: skip IN src on EX group",
3141 __func__);
3142 continue;
3143 }
3144 nrt = (rectype_t)now;
3145 if (nrt == REC_NONE)
3146 nrt = (rectype_t)(~mode & REC_FULL);
3147 if (schanged++ == 0) {
3148 crt = nrt;
3149 } else if (crt != nrt)
3150 continue;
3151 naddr = htonl(ims->ims_haddr);
3152 if (!m_append(m, sizeof(in_addr_t),
3153 (void *)&naddr)) {
3154 if (m != m0)
3155 m_freem(m);
3156 CTR1(KTR_IGMPV3,
3157 "%s: m_append() failed", __func__);
3158 return (-ENOMEM);
3159 }
3160 nallow += !!(crt == REC_ALLOW);
3161 nblock += !!(crt == REC_BLOCK);
3162 if (++rsrcs == m0srcs)
3163 break;
3164 }
3165 /*
3166 * If we did not append any tree nodes on this
3167 * pass, back out of allocations.
3168 */
3169 if (rsrcs == 0) {
3170 npbytes -= sizeof(struct igmp_grouprec);
3171 if (m != m0) {
3172 CTR1(KTR_IGMPV3,
3173 "%s: m_free(m)", __func__);
3174 m_freem(m);
3175 } else {
3176 CTR1(KTR_IGMPV3,
3177 "%s: m_adj(m, -ig)", __func__);
3178 m_adj(m, -((int)sizeof(
3179 struct igmp_grouprec)));
3180 }
3181 continue;
3182 }
3183 npbytes += (rsrcs * sizeof(in_addr_t));
3184 if (crt == REC_ALLOW)
3185 pig->ig_type = IGMP_ALLOW_NEW_SOURCES;
3186 else if (crt == REC_BLOCK)
3187 pig->ig_type = IGMP_BLOCK_OLD_SOURCES;
3188 pig->ig_numsrc = htons(rsrcs);
3189 /*
3190 * Count the new group record, and enqueue this
3191 * packet if it wasn't already queued.
3192 */
3193 m->m_pkthdr.PH_vt.vt_nrecs++;
3194 if (m != m0)
3195 mbufq_enqueue(mq, m);
3196 nbytes += npbytes;
3197 } while (nims != NULL);
3198 drt |= crt;
3199 crt = (~crt & REC_FULL);
3200 }
3201
3202 CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
3203 nallow, nblock);
3204
3205 return (nbytes);
3206 }
3207
3208 static int
3209 igmp_v3_merge_state_changes(struct in_multi *inm, struct mbufq *scq)
3210 {
3211 struct mbufq *gq;
3212 struct mbuf *m; /* pending state-change */
3213 struct mbuf *m0; /* copy of pending state-change */
3214 struct mbuf *mt; /* last state-change in packet */
3215 int docopy, domerge;
3216 u_int recslen;
3217
3218 docopy = 0;
3219 domerge = 0;
3220 recslen = 0;
3221
3222 IN_MULTI_LOCK_ASSERT();
3223 IGMP_LOCK_ASSERT();
3224
3225 /*
3226 * If there are further pending retransmissions, make a writable
3227 * copy of each queued state-change message before merging.
3228 */
3229 if (inm->inm_scrv > 0)
3230 docopy = 1;
3231
3232 gq = &inm->inm_scq;
3233 #ifdef KTR
3234 if (mbufq_first(gq) == NULL) {
3235 CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty",
3236 __func__, inm);
3237 }
3238 #endif
3239
3240 m = mbufq_first(gq);
3241 while (m != NULL) {
3242 /*
3243 * Only merge the report into the current packet if
3244 * there is sufficient space to do so; an IGMPv3 report
3245 * packet may only contain 65,535 group records.
3246 * Always use a simple mbuf chain concatentation to do this,
3247 * as large state changes for single groups may have
3248 * allocated clusters.
3249 */
3250 domerge = 0;
3251 mt = mbufq_last(scq);
3252 if (mt != NULL) {
3253 recslen = m_length(m, NULL);
3254
3255 if ((mt->m_pkthdr.PH_vt.vt_nrecs +
3256 m->m_pkthdr.PH_vt.vt_nrecs <=
3257 IGMP_V3_REPORT_MAXRECS) &&
3258 (mt->m_pkthdr.len + recslen <=
3259 (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE)))
3260 domerge = 1;
3261 }
3262
3263 if (!domerge && mbufq_full(gq)) {
3264 CTR2(KTR_IGMPV3,
3265 "%s: outbound queue full, skipping whole packet %p",
3266 __func__, m);
3267 mt = m->m_nextpkt;
3268 if (!docopy)
3269 m_freem(m);
3270 m = mt;
3271 continue;
3272 }
3273
3274 if (!docopy) {
3275 CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m);
3276 m0 = mbufq_dequeue(gq);
3277 m = m0->m_nextpkt;
3278 } else {
3279 CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m);
3280 m0 = m_dup(m, M_NOWAIT);
3281 if (m0 == NULL)
3282 return (ENOMEM);
3283 m0->m_nextpkt = NULL;
3284 m = m->m_nextpkt;
3285 }
3286
3287 if (!domerge) {
3288 CTR3(KTR_IGMPV3, "%s: queueing %p to scq %p)",
3289 __func__, m0, scq);
3290 mbufq_enqueue(scq, m0);
3291 } else {
3292 struct mbuf *mtl; /* last mbuf of packet mt */
3293
3294 CTR3(KTR_IGMPV3, "%s: merging %p with scq tail %p)",
3295 __func__, m0, mt);
3296
3297 mtl = m_last(mt);
3298 m0->m_flags &= ~M_PKTHDR;
3299 mt->m_pkthdr.len += recslen;
3300 mt->m_pkthdr.PH_vt.vt_nrecs +=
3301 m0->m_pkthdr.PH_vt.vt_nrecs;
3302
3303 mtl->m_next = m0;
3304 }
3305 }
3306
3307 return (0);
3308 }
3309
3310 /*
3311 * Respond to a pending IGMPv3 General Query.
3312 */
3313 static void
3314 igmp_v3_dispatch_general_query(struct igmp_ifsoftc *igi)
3315 {
3316 struct ifmultiaddr *ifma;
3317 struct ifnet *ifp;
3318 struct in_multi *inm;
3319 int retval, loop;
3320
3321 IN_MULTI_LOCK_ASSERT();
3322 IGMP_LOCK_ASSERT();
3323
3324 KASSERT(igi->igi_version == IGMP_VERSION_3,
3325 ("%s: called when version %d", __func__, igi->igi_version));
3326
3327 /*
3328 * Check that there are some packets queued. If so, send them first.
3329 * For large number of groups the reply to general query can take
3330 * many packets, we should finish sending them before starting of
3331 * queuing the new reply.
3332 */
3333 if (mbufq_len(&igi->igi_gq) != 0)
3334 goto send;
3335
3336 ifp = igi->igi_ifp;
3337
3338 IF_ADDR_RLOCK(ifp);
3339 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3340 if (ifma->ifma_addr->sa_family != AF_INET ||
3341 ifma->ifma_protospec == NULL)
3342 continue;
3343
3344 inm = (struct in_multi *)ifma->ifma_protospec;
3345 KASSERT(ifp == inm->inm_ifp,
3346 ("%s: inconsistent ifp", __func__));
3347
3348 switch (inm->inm_state) {
3349 case IGMP_NOT_MEMBER:
3350 case IGMP_SILENT_MEMBER:
3351 break;
3352 case IGMP_REPORTING_MEMBER:
3353 case IGMP_IDLE_MEMBER:
3354 case IGMP_LAZY_MEMBER:
3355 case IGMP_SLEEPING_MEMBER:
3356 case IGMP_AWAKENING_MEMBER:
3357 inm->inm_state = IGMP_REPORTING_MEMBER;
3358 retval = igmp_v3_enqueue_group_record(&igi->igi_gq,
3359 inm, 0, 0, 0);
3360 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
3361 __func__, retval);
3362 break;
3363 case IGMP_G_QUERY_PENDING_MEMBER:
3364 case IGMP_SG_QUERY_PENDING_MEMBER:
3365 case IGMP_LEAVING_MEMBER:
3366 break;
3367 }
3368 }
3369 IF_ADDR_RUNLOCK(ifp);
3370
3371 send:
3372 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
3373 igmp_dispatch_queue(&igi->igi_gq, IGMP_MAX_RESPONSE_BURST, loop);
3374
3375 /*
3376 * Slew transmission of bursts over 500ms intervals.
3377 */
3378 if (mbufq_first(&igi->igi_gq) != NULL) {
3379 igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY(
3380 IGMP_RESPONSE_BURST_INTERVAL);
3381 V_interface_timers_running = 1;
3382 }
3383 }
3384
3385 /*
3386 * Transmit the next pending IGMP message in the output queue.
3387 *
3388 * We get called from netisr_processqueue(). A mutex private to igmpoq
3389 * will be acquired and released around this routine.
3390 *
3391 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3392 * MRT: Nothing needs to be done, as IGMP traffic is always local to
3393 * a link and uses a link-scope multicast address.
3394 */
3395 static void
3396 igmp_intr(struct mbuf *m)
3397 {
3398 struct ip_moptions imo;
3399 struct ifnet *ifp;
3400 struct mbuf *ipopts, *m0;
3401 int error;
3402 uint32_t ifindex;
3403
3404 CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m);
3405
3406 /*
3407 * Set VNET image pointer from enqueued mbuf chain
3408 * before doing anything else. Whilst we use interface
3409 * indexes to guard against interface detach, they are
3410 * unique to each VIMAGE and must be retrieved.
3411 */
3412 CURVNET_SET((struct vnet *)(m->m_pkthdr.PH_loc.ptr));
3413 ifindex = igmp_restore_context(m);
3414
3415 /*
3416 * Check if the ifnet still exists. This limits the scope of
3417 * any race in the absence of a global ifp lock for low cost
3418 * (an array lookup).
3419 */
3420 ifp = ifnet_byindex(ifindex);
3421 if (ifp == NULL) {
3422 CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.",
3423 __func__, m, ifindex);
3424 m_freem(m);
3425 IPSTAT_INC(ips_noroute);
3426 goto out;
3427 }
3428
3429 ipopts = V_igmp_sendra ? m_raopt : NULL;
3430
3431 imo.imo_multicast_ttl = 1;
3432 imo.imo_multicast_vif = -1;
3433 imo.imo_multicast_loop = (V_ip_mrouter != NULL);
3434
3435 /*
3436 * If the user requested that IGMP traffic be explicitly
3437 * redirected to the loopback interface (e.g. they are running a
3438 * MANET interface and the routing protocol needs to see the
3439 * updates), handle this now.
3440 */
3441 if (m->m_flags & M_IGMP_LOOP)
3442 imo.imo_multicast_ifp = V_loif;
3443 else
3444 imo.imo_multicast_ifp = ifp;
3445
3446 if (m->m_flags & M_IGMPV2) {
3447 m0 = m;
3448 } else {
3449 m0 = igmp_v3_encap_report(ifp, m);
3450 if (m0 == NULL) {
3451 CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m);
3452 m_freem(m);
3453 IPSTAT_INC(ips_odropped);
3454 goto out;
3455 }
3456 }
3457
3458 igmp_scrub_context(m0);
3459 m_clrprotoflags(m);
3460 m0->m_pkthdr.rcvif = V_loif;
3461 #ifdef MAC
3462 mac_netinet_igmp_send(ifp, m0);
3463 #endif
3464 error = ip_output(m0, ipopts, NULL, 0, &imo, NULL);
3465 if (error) {
3466 CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error);
3467 goto out;
3468 }
3469
3470 IGMPSTAT_INC(igps_snd_reports);
3471
3472 out:
3473 /*
3474 * We must restore the existing vnet pointer before
3475 * continuing as we are run from netisr context.
3476 */
3477 CURVNET_RESTORE();
3478 }
3479
3480 /*
3481 * Encapsulate an IGMPv3 report.
3482 *
3483 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf
3484 * chain has already had its IP/IGMPv3 header prepended. In this case
3485 * the function will not attempt to prepend; the lengths and checksums
3486 * will however be re-computed.
3487 *
3488 * Returns a pointer to the new mbuf chain head, or NULL if the
3489 * allocation failed.
3490 */
3491 static struct mbuf *
3492 igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m)
3493 {
3494 struct rm_priotracker in_ifa_tracker;
3495 struct igmp_report *igmp;
3496 struct ip *ip;
3497 int hdrlen, igmpreclen;
3498
3499 KASSERT((m->m_flags & M_PKTHDR),
3500 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3501
3502 igmpreclen = m_length(m, NULL);
3503 hdrlen = sizeof(struct ip) + sizeof(struct igmp_report);
3504
3505 if (m->m_flags & M_IGMPV3_HDR) {
3506 igmpreclen -= hdrlen;
3507 } else {
3508 M_PREPEND(m, hdrlen, M_NOWAIT);
3509 if (m == NULL)
3510 return (NULL);
3511 m->m_flags |= M_IGMPV3_HDR;
3512 }
3513
3514 CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen);
3515
3516 m->m_data += sizeof(struct ip);
3517 m->m_len -= sizeof(struct ip);
3518
3519 igmp = mtod(m, struct igmp_report *);
3520 igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT;
3521 igmp->ir_rsv1 = 0;
3522 igmp->ir_rsv2 = 0;
3523 igmp->ir_numgrps = htons(m->m_pkthdr.PH_vt.vt_nrecs);
3524 igmp->ir_cksum = 0;
3525 igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen);
3526 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3527
3528 m->m_data -= sizeof(struct ip);
3529 m->m_len += sizeof(struct ip);
3530
3531 ip = mtod(m, struct ip *);
3532 ip->ip_tos = IPTOS_PREC_INTERNETCONTROL;
3533 ip->ip_len = htons(hdrlen + igmpreclen);
3534 ip->ip_off = htons(IP_DF);
3535 ip->ip_p = IPPROTO_IGMP;
3536 ip->ip_sum = 0;
3537
3538 ip->ip_src.s_addr = INADDR_ANY;
3539
3540 if (m->m_flags & M_IGMP_LOOP) {
3541 struct in_ifaddr *ia;
3542
3543 IFP_TO_IA(ifp, ia, &in_ifa_tracker);
3544 if (ia != NULL) {
3545 ip->ip_src = ia->ia_addr.sin_addr;
3546 ifa_free(&ia->ia_ifa);
3547 }
3548 }
3549
3550 ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP);
3551
3552 return (m);
3553 }
3554
3555 #ifdef KTR
3556 static char *
3557 igmp_rec_type_to_str(const int type)
3558 {
3559
3560 switch (type) {
3561 case IGMP_CHANGE_TO_EXCLUDE_MODE:
3562 return "TO_EX";
3563 break;
3564 case IGMP_CHANGE_TO_INCLUDE_MODE:
3565 return "TO_IN";
3566 break;
3567 case IGMP_MODE_IS_EXCLUDE:
3568 return "MODE_EX";
3569 break;
3570 case IGMP_MODE_IS_INCLUDE:
3571 return "MODE_IN";
3572 break;
3573 case IGMP_ALLOW_NEW_SOURCES:
3574 return "ALLOW_NEW";
3575 break;
3576 case IGMP_BLOCK_OLD_SOURCES:
3577 return "BLOCK_OLD";
3578 break;
3579 default:
3580 break;
3581 }
3582 return "unknown";
3583 }
3584 #endif
3585
3586 #ifdef VIMAGE
3587 static void
3588 vnet_igmp_init(const void *unused __unused)
3589 {
3590
3591 netisr_register_vnet(&igmp_nh);
3592 }
3593 VNET_SYSINIT(vnet_igmp_init, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3594 vnet_igmp_init, NULL);
3595
3596 static void
3597 vnet_igmp_uninit(const void *unused __unused)
3598 {
3599
3600 /* This can happen when we shutdown the entire network stack. */
3601 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3602
3603 netisr_unregister_vnet(&igmp_nh);
3604 }
3605 VNET_SYSUNINIT(vnet_igmp_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3606 vnet_igmp_uninit, NULL);
3607 #endif
3608
3609 #ifdef DDB
3610 DB_SHOW_COMMAND(igi_list, db_show_igi_list)
3611 {
3612 struct igmp_ifsoftc *igi, *tigi;
3613 LIST_HEAD(_igi_list, igmp_ifsoftc) *igi_head;
3614
3615 if (!have_addr) {
3616 db_printf("usage: show igi_list <addr>\n");
3617 return;
3618 }
3619 igi_head = (struct _igi_list *)addr;
3620
3621 LIST_FOREACH_SAFE(igi, igi_head, igi_link, tigi) {
3622 db_printf("igmp_ifsoftc %p:\n", igi);
3623 db_printf(" ifp %p\n", igi->igi_ifp);
3624 db_printf(" version %u\n", igi->igi_version);
3625 db_printf(" v1_timer %u\n", igi->igi_v1_timer);
3626 db_printf(" v2_timer %u\n", igi->igi_v2_timer);
3627 db_printf(" v3_timer %u\n", igi->igi_v3_timer);
3628 db_printf(" flags %#x\n", igi->igi_flags);
3629 db_printf(" rv %u\n", igi->igi_rv);
3630 db_printf(" qi %u\n", igi->igi_qi);
3631 db_printf(" qri %u\n", igi->igi_qri);
3632 db_printf(" uri %u\n", igi->igi_uri);
3633 /* SLIST_HEAD(,in_multi) igi_relinmhead */
3634 /* struct mbufq igi_gq; */
3635 db_printf("\n");
3636 }
3637 }
3638 #endif
3639
3640 static int
3641 igmp_modevent(module_t mod, int type, void *unused __unused)
3642 {
3643
3644 switch (type) {
3645 case MOD_LOAD:
3646 CTR1(KTR_IGMPV3, "%s: initializing", __func__);
3647 IGMP_LOCK_INIT();
3648 m_raopt = igmp_ra_alloc();
3649 netisr_register(&igmp_nh);
3650 break;
3651 case MOD_UNLOAD:
3652 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3653 netisr_unregister(&igmp_nh);
3654 m_free(m_raopt);
3655 m_raopt = NULL;
3656 IGMP_LOCK_DESTROY();
3657 break;
3658 default:
3659 return (EOPNOTSUPP);
3660 }
3661 return (0);
3662 }
3663
3664 static moduledata_t igmp_mod = {
3665 "igmp",
3666 igmp_modevent,
3667 0
3668 };
3669 DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE);
Cache object: 68584ffec7774eb0578605ce6e106498
|