FreeBSD/Linux Kernel Cross Reference
sys/netinet/igmp.c
1 /*-
2 * Copyright (c) 2007-2009 Bruce Simpson.
3 * Copyright (c) 1988 Stephen Deering.
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Stephen Deering of Stanford University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
35 */
36
37 /*
38 * Internet Group Management Protocol (IGMP) routines.
39 * [RFC1112, RFC2236, RFC3376]
40 *
41 * Written by Steve Deering, Stanford, May 1988.
42 * Modified by Rosen Sharma, Stanford, Aug 1994.
43 * Modified by Bill Fenner, Xerox PARC, Feb 1995.
44 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995.
45 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson.
46 *
47 * MULTICAST Revision: 3.5.1.4
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD: releng/9.1/sys/netinet/igmp.c 233200 2012-03-19 20:49:16Z jhb $");
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/module.h>
56 #include <sys/malloc.h>
57 #include <sys/mbuf.h>
58 #include <sys/socket.h>
59 #include <sys/protosw.h>
60 #include <sys/kernel.h>
61 #include <sys/sysctl.h>
62 #include <sys/ktr.h>
63 #include <sys/condvar.h>
64
65 #include <net/if.h>
66 #include <net/netisr.h>
67 #include <net/vnet.h>
68
69 #include <netinet/in.h>
70 #include <netinet/in_var.h>
71 #include <netinet/in_systm.h>
72 #include <netinet/ip.h>
73 #include <netinet/ip_var.h>
74 #include <netinet/ip_options.h>
75 #include <netinet/igmp.h>
76 #include <netinet/igmp_var.h>
77
78 #include <machine/in_cksum.h>
79
80 #include <security/mac/mac_framework.h>
81
82 #ifndef KTR_IGMPV3
83 #define KTR_IGMPV3 KTR_INET
84 #endif
85
86 static struct igmp_ifinfo *
87 igi_alloc_locked(struct ifnet *);
88 static void igi_delete_locked(const struct ifnet *);
89 static void igmp_dispatch_queue(struct ifqueue *, int, const int);
90 static void igmp_fasttimo_vnet(void);
91 static void igmp_final_leave(struct in_multi *, struct igmp_ifinfo *);
92 static int igmp_handle_state_change(struct in_multi *,
93 struct igmp_ifinfo *);
94 static int igmp_initial_join(struct in_multi *, struct igmp_ifinfo *);
95 static int igmp_input_v1_query(struct ifnet *, const struct ip *,
96 const struct igmp *);
97 static int igmp_input_v2_query(struct ifnet *, const struct ip *,
98 const struct igmp *);
99 static int igmp_input_v3_query(struct ifnet *, const struct ip *,
100 /*const*/ struct igmpv3 *);
101 static int igmp_input_v3_group_query(struct in_multi *,
102 struct igmp_ifinfo *, int, /*const*/ struct igmpv3 *);
103 static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *,
104 /*const*/ struct igmp *);
105 static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *,
106 /*const*/ struct igmp *);
107 static void igmp_intr(struct mbuf *);
108 static int igmp_isgroupreported(const struct in_addr);
109 static struct mbuf *
110 igmp_ra_alloc(void);
111 #ifdef KTR
112 static char * igmp_rec_type_to_str(const int);
113 #endif
114 static void igmp_set_version(struct igmp_ifinfo *, const int);
115 static void igmp_slowtimo_vnet(void);
116 static int igmp_v1v2_queue_report(struct in_multi *, const int);
117 static void igmp_v1v2_process_group_timer(struct in_multi *, const int);
118 static void igmp_v1v2_process_querier_timers(struct igmp_ifinfo *);
119 static void igmp_v2_update_group(struct in_multi *, const int);
120 static void igmp_v3_cancel_link_timers(struct igmp_ifinfo *);
121 static void igmp_v3_dispatch_general_query(struct igmp_ifinfo *);
122 static struct mbuf *
123 igmp_v3_encap_report(struct ifnet *, struct mbuf *);
124 static int igmp_v3_enqueue_group_record(struct ifqueue *,
125 struct in_multi *, const int, const int, const int);
126 static int igmp_v3_enqueue_filter_change(struct ifqueue *,
127 struct in_multi *);
128 static void igmp_v3_process_group_timers(struct igmp_ifinfo *,
129 struct ifqueue *, struct ifqueue *, struct in_multi *,
130 const int);
131 static int igmp_v3_merge_state_changes(struct in_multi *,
132 struct ifqueue *);
133 static void igmp_v3_suppress_group_record(struct in_multi *);
134 static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS);
135 static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS);
136 static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS);
137
138 static const struct netisr_handler igmp_nh = {
139 .nh_name = "igmp",
140 .nh_handler = igmp_intr,
141 .nh_proto = NETISR_IGMP,
142 .nh_policy = NETISR_POLICY_SOURCE,
143 };
144
145 /*
146 * System-wide globals.
147 *
148 * Unlocked access to these is OK, except for the global IGMP output
149 * queue. The IGMP subsystem lock ends up being system-wide for the moment,
150 * because all VIMAGEs have to share a global output queue, as netisrs
151 * themselves are not virtualized.
152 *
153 * Locking:
154 * * The permitted lock order is: IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
155 * Any may be taken independently; if any are held at the same
156 * time, the above lock order must be followed.
157 * * All output is delegated to the netisr.
158 * Now that Giant has been eliminated, the netisr may be inlined.
159 * * IN_MULTI_LOCK covers in_multi.
160 * * IGMP_LOCK covers igmp_ifinfo and any global variables in this file,
161 * including the output queue.
162 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
163 * per-link state iterators.
164 * * igmp_ifinfo is valid as long as PF_INET is attached to the interface,
165 * therefore it is not refcounted.
166 * We allow unlocked reads of igmp_ifinfo when accessed via in_multi.
167 *
168 * Reference counting
169 * * IGMP acquires its own reference every time an in_multi is passed to
170 * it and the group is being joined for the first time.
171 * * IGMP releases its reference(s) on in_multi in a deferred way,
172 * because the operations which process the release run as part of
173 * a loop whose control variables are directly affected by the release
174 * (that, and not recursing on the IF_ADDR_LOCK).
175 *
176 * VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds
177 * to a vnet in ifp->if_vnet.
178 *
179 * SMPng: XXX We may potentially race operations on ifma_protospec.
180 * The problem is that we currently lack a clean way of taking the
181 * IF_ADDR_LOCK() between the ifnet and in layers w/o recursing,
182 * as anything which modifies ifma needs to be covered by that lock.
183 * So check for ifma_protospec being NULL before proceeding.
184 */
185 struct mtx igmp_mtx;
186
187 struct mbuf *m_raopt; /* Router Alert option */
188 MALLOC_DEFINE(M_IGMP, "igmp", "igmp state");
189
190 /*
191 * VIMAGE-wide globals.
192 *
193 * The IGMPv3 timers themselves need to run per-image, however,
194 * protosw timers run globally (see tcp).
195 * An ifnet can only be in one vimage at a time, and the loopback
196 * ifnet, loif, is itself virtualized.
197 * It would otherwise be possible to seriously hose IGMP state,
198 * and create inconsistencies in upstream multicast routing, if you have
199 * multiple VIMAGEs running on the same link joining different multicast
200 * groups, UNLESS the "primary IP address" is different. This is because
201 * IGMP for IPv4 does not force link-local addresses to be used for each
202 * node, unlike MLD for IPv6.
203 * Obviously the IGMPv3 per-interface state has per-vimage granularity
204 * also as a result.
205 *
206 * FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection
207 * policy to control the address used by IGMP on the link.
208 */
209 static VNET_DEFINE(int, interface_timers_running); /* IGMPv3 general
210 * query response */
211 static VNET_DEFINE(int, state_change_timers_running); /* IGMPv3 state-change
212 * retransmit */
213 static VNET_DEFINE(int, current_state_timers_running); /* IGMPv1/v2 host
214 * report; IGMPv3 g/sg
215 * query response */
216
217 #define V_interface_timers_running VNET(interface_timers_running)
218 #define V_state_change_timers_running VNET(state_change_timers_running)
219 #define V_current_state_timers_running VNET(current_state_timers_running)
220
221 static VNET_DEFINE(LIST_HEAD(, igmp_ifinfo), igi_head);
222 static VNET_DEFINE(struct igmpstat, igmpstat) = {
223 .igps_version = IGPS_VERSION_3,
224 .igps_len = sizeof(struct igmpstat),
225 };
226 static VNET_DEFINE(struct timeval, igmp_gsrdelay) = {10, 0};
227
228 #define V_igi_head VNET(igi_head)
229 #define V_igmpstat VNET(igmpstat)
230 #define V_igmp_gsrdelay VNET(igmp_gsrdelay)
231
232 static VNET_DEFINE(int, igmp_recvifkludge) = 1;
233 static VNET_DEFINE(int, igmp_sendra) = 1;
234 static VNET_DEFINE(int, igmp_sendlocal) = 1;
235 static VNET_DEFINE(int, igmp_v1enable) = 1;
236 static VNET_DEFINE(int, igmp_v2enable) = 1;
237 static VNET_DEFINE(int, igmp_legacysupp);
238 static VNET_DEFINE(int, igmp_default_version) = IGMP_VERSION_3;
239
240 #define V_igmp_recvifkludge VNET(igmp_recvifkludge)
241 #define V_igmp_sendra VNET(igmp_sendra)
242 #define V_igmp_sendlocal VNET(igmp_sendlocal)
243 #define V_igmp_v1enable VNET(igmp_v1enable)
244 #define V_igmp_v2enable VNET(igmp_v2enable)
245 #define V_igmp_legacysupp VNET(igmp_legacysupp)
246 #define V_igmp_default_version VNET(igmp_default_version)
247
248 /*
249 * Virtualized sysctls.
250 */
251 SYSCTL_VNET_STRUCT(_net_inet_igmp, IGMPCTL_STATS, stats, CTLFLAG_RW,
252 &VNET_NAME(igmpstat), igmpstat, "");
253 SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, recvifkludge, CTLFLAG_RW,
254 &VNET_NAME(igmp_recvifkludge), 0,
255 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address");
256 SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, sendra, CTLFLAG_RW,
257 &VNET_NAME(igmp_sendra), 0,
258 "Send IP Router Alert option in IGMPv2/v3 messages");
259 SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, sendlocal, CTLFLAG_RW,
260 &VNET_NAME(igmp_sendlocal), 0,
261 "Send IGMP membership reports for 224.0.0.0/24 groups");
262 SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, v1enable, CTLFLAG_RW,
263 &VNET_NAME(igmp_v1enable), 0,
264 "Enable backwards compatibility with IGMPv1");
265 SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, v2enable, CTLFLAG_RW,
266 &VNET_NAME(igmp_v2enable), 0,
267 "Enable backwards compatibility with IGMPv2");
268 SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, legacysupp, CTLFLAG_RW,
269 &VNET_NAME(igmp_legacysupp), 0,
270 "Allow v1/v2 reports to suppress v3 group responses");
271 SYSCTL_VNET_PROC(_net_inet_igmp, OID_AUTO, default_version,
272 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
273 &VNET_NAME(igmp_default_version), 0, sysctl_igmp_default_version, "I",
274 "Default version of IGMP to run on each interface");
275 SYSCTL_VNET_PROC(_net_inet_igmp, OID_AUTO, gsrdelay,
276 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
277 &VNET_NAME(igmp_gsrdelay.tv_sec), 0, sysctl_igmp_gsr, "I",
278 "Rate limit for IGMPv3 Group-and-Source queries in seconds");
279
280 /*
281 * Non-virtualized sysctls.
282 */
283 SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_MPSAFE,
284 sysctl_igmp_ifinfo, "Per-interface IGMPv3 state");
285
286 static __inline void
287 igmp_save_context(struct mbuf *m, struct ifnet *ifp)
288 {
289
290 #ifdef VIMAGE
291 m->m_pkthdr.header = ifp->if_vnet;
292 #endif /* VIMAGE */
293 m->m_pkthdr.flowid = ifp->if_index;
294 }
295
296 static __inline void
297 igmp_scrub_context(struct mbuf *m)
298 {
299
300 m->m_pkthdr.header = NULL;
301 m->m_pkthdr.flowid = 0;
302 }
303
304 #ifdef KTR
305 static __inline char *
306 inet_ntoa_haddr(in_addr_t haddr)
307 {
308 struct in_addr ia;
309
310 ia.s_addr = htonl(haddr);
311 return (inet_ntoa(ia));
312 }
313 #endif
314
315 /*
316 * Restore context from a queued IGMP output chain.
317 * Return saved ifindex.
318 *
319 * VIMAGE: The assertion is there to make sure that we
320 * actually called CURVNET_SET() with what's in the mbuf chain.
321 */
322 static __inline uint32_t
323 igmp_restore_context(struct mbuf *m)
324 {
325
326 #ifdef notyet
327 #if defined(VIMAGE) && defined(INVARIANTS)
328 KASSERT(curvnet == (m->m_pkthdr.header),
329 ("%s: called when curvnet was not restored", __func__));
330 #endif
331 #endif
332 return (m->m_pkthdr.flowid);
333 }
334
335 /*
336 * Retrieve or set default IGMP version.
337 *
338 * VIMAGE: Assume curvnet set by caller.
339 * SMPng: NOTE: Serialized by IGMP lock.
340 */
341 static int
342 sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS)
343 {
344 int error;
345 int new;
346
347 error = sysctl_wire_old_buffer(req, sizeof(int));
348 if (error)
349 return (error);
350
351 IGMP_LOCK();
352
353 new = V_igmp_default_version;
354
355 error = sysctl_handle_int(oidp, &new, 0, req);
356 if (error || !req->newptr)
357 goto out_locked;
358
359 if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) {
360 error = EINVAL;
361 goto out_locked;
362 }
363
364 CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d",
365 V_igmp_default_version, new);
366
367 V_igmp_default_version = new;
368
369 out_locked:
370 IGMP_UNLOCK();
371 return (error);
372 }
373
374 /*
375 * Retrieve or set threshold between group-source queries in seconds.
376 *
377 * VIMAGE: Assume curvnet set by caller.
378 * SMPng: NOTE: Serialized by IGMP lock.
379 */
380 static int
381 sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS)
382 {
383 int error;
384 int i;
385
386 error = sysctl_wire_old_buffer(req, sizeof(int));
387 if (error)
388 return (error);
389
390 IGMP_LOCK();
391
392 i = V_igmp_gsrdelay.tv_sec;
393
394 error = sysctl_handle_int(oidp, &i, 0, req);
395 if (error || !req->newptr)
396 goto out_locked;
397
398 if (i < -1 || i >= 60) {
399 error = EINVAL;
400 goto out_locked;
401 }
402
403 CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d",
404 V_igmp_gsrdelay.tv_sec, i);
405 V_igmp_gsrdelay.tv_sec = i;
406
407 out_locked:
408 IGMP_UNLOCK();
409 return (error);
410 }
411
412 /*
413 * Expose struct igmp_ifinfo to userland, keyed by ifindex.
414 * For use by ifmcstat(8).
415 *
416 * SMPng: NOTE: Does an unlocked ifindex space read.
417 * VIMAGE: Assume curvnet set by caller. The node handler itself
418 * is not directly virtualized.
419 */
420 static int
421 sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
422 {
423 int *name;
424 int error;
425 u_int namelen;
426 struct ifnet *ifp;
427 struct igmp_ifinfo *igi;
428
429 name = (int *)arg1;
430 namelen = arg2;
431
432 if (req->newptr != NULL)
433 return (EPERM);
434
435 if (namelen != 1)
436 return (EINVAL);
437
438 error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo));
439 if (error)
440 return (error);
441
442 IN_MULTI_LOCK();
443 IGMP_LOCK();
444
445 if (name[0] <= 0 || name[0] > V_if_index) {
446 error = ENOENT;
447 goto out_locked;
448 }
449
450 error = ENOENT;
451
452 ifp = ifnet_byindex(name[0]);
453 if (ifp == NULL)
454 goto out_locked;
455
456 LIST_FOREACH(igi, &V_igi_head, igi_link) {
457 if (ifp == igi->igi_ifp) {
458 error = SYSCTL_OUT(req, igi,
459 sizeof(struct igmp_ifinfo));
460 break;
461 }
462 }
463
464 out_locked:
465 IGMP_UNLOCK();
466 IN_MULTI_UNLOCK();
467 return (error);
468 }
469
470 /*
471 * Dispatch an entire queue of pending packet chains
472 * using the netisr.
473 * VIMAGE: Assumes the vnet pointer has been set.
474 */
475 static void
476 igmp_dispatch_queue(struct ifqueue *ifq, int limit, const int loop)
477 {
478 struct mbuf *m;
479
480 for (;;) {
481 _IF_DEQUEUE(ifq, m);
482 if (m == NULL)
483 break;
484 CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, ifq, m);
485 if (loop)
486 m->m_flags |= M_IGMP_LOOP;
487 netisr_dispatch(NETISR_IGMP, m);
488 if (--limit == 0)
489 break;
490 }
491 }
492
493 /*
494 * Filter outgoing IGMP report state by group.
495 *
496 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1).
497 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are
498 * disabled for all groups in the 224.0.0.0/24 link-local scope. However,
499 * this may break certain IGMP snooping switches which rely on the old
500 * report behaviour.
501 *
502 * Return zero if the given group is one for which IGMP reports
503 * should be suppressed, or non-zero if reports should be issued.
504 */
505 static __inline int
506 igmp_isgroupreported(const struct in_addr addr)
507 {
508
509 if (in_allhosts(addr) ||
510 ((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr)))))
511 return (0);
512
513 return (1);
514 }
515
516 /*
517 * Construct a Router Alert option to use in outgoing packets.
518 */
519 static struct mbuf *
520 igmp_ra_alloc(void)
521 {
522 struct mbuf *m;
523 struct ipoption *p;
524
525 MGET(m, M_DONTWAIT, MT_DATA);
526 p = mtod(m, struct ipoption *);
527 p->ipopt_dst.s_addr = INADDR_ANY;
528 p->ipopt_list[0] = IPOPT_RA; /* Router Alert Option */
529 p->ipopt_list[1] = 0x04; /* 4 bytes long */
530 p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */
531 p->ipopt_list[3] = 0x00; /* pad byte */
532 m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1];
533
534 return (m);
535 }
536
537 /*
538 * Attach IGMP when PF_INET is attached to an interface.
539 */
540 struct igmp_ifinfo *
541 igmp_domifattach(struct ifnet *ifp)
542 {
543 struct igmp_ifinfo *igi;
544
545 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
546 __func__, ifp, ifp->if_xname);
547
548 IGMP_LOCK();
549
550 igi = igi_alloc_locked(ifp);
551 if (!(ifp->if_flags & IFF_MULTICAST))
552 igi->igi_flags |= IGIF_SILENT;
553
554 IGMP_UNLOCK();
555
556 return (igi);
557 }
558
559 /*
560 * VIMAGE: assume curvnet set by caller.
561 */
562 static struct igmp_ifinfo *
563 igi_alloc_locked(/*const*/ struct ifnet *ifp)
564 {
565 struct igmp_ifinfo *igi;
566
567 IGMP_LOCK_ASSERT();
568
569 igi = malloc(sizeof(struct igmp_ifinfo), M_IGMP, M_NOWAIT|M_ZERO);
570 if (igi == NULL)
571 goto out;
572
573 igi->igi_ifp = ifp;
574 igi->igi_version = V_igmp_default_version;
575 igi->igi_flags = 0;
576 igi->igi_rv = IGMP_RV_INIT;
577 igi->igi_qi = IGMP_QI_INIT;
578 igi->igi_qri = IGMP_QRI_INIT;
579 igi->igi_uri = IGMP_URI_INIT;
580
581 SLIST_INIT(&igi->igi_relinmhead);
582
583 /*
584 * Responses to general queries are subject to bounds.
585 */
586 IFQ_SET_MAXLEN(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
587
588 LIST_INSERT_HEAD(&V_igi_head, igi, igi_link);
589
590 CTR2(KTR_IGMPV3, "allocate igmp_ifinfo for ifp %p(%s)",
591 ifp, ifp->if_xname);
592
593 out:
594 return (igi);
595 }
596
597 /*
598 * Hook for ifdetach.
599 *
600 * NOTE: Some finalization tasks need to run before the protocol domain
601 * is detached, but also before the link layer does its cleanup.
602 *
603 * SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK().
604 * XXX This is also bitten by unlocked ifma_protospec access.
605 */
606 void
607 igmp_ifdetach(struct ifnet *ifp)
608 {
609 struct igmp_ifinfo *igi;
610 struct ifmultiaddr *ifma;
611 struct in_multi *inm, *tinm;
612
613 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp,
614 ifp->if_xname);
615
616 IGMP_LOCK();
617
618 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
619 if (igi->igi_version == IGMP_VERSION_3) {
620 IF_ADDR_RLOCK(ifp);
621 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
622 if (ifma->ifma_addr->sa_family != AF_INET ||
623 ifma->ifma_protospec == NULL)
624 continue;
625 #if 0
626 KASSERT(ifma->ifma_protospec != NULL,
627 ("%s: ifma_protospec is NULL", __func__));
628 #endif
629 inm = (struct in_multi *)ifma->ifma_protospec;
630 if (inm->inm_state == IGMP_LEAVING_MEMBER) {
631 SLIST_INSERT_HEAD(&igi->igi_relinmhead,
632 inm, inm_nrele);
633 }
634 inm_clear_recorded(inm);
635 }
636 IF_ADDR_RUNLOCK(ifp);
637 /*
638 * Free the in_multi reference(s) for this IGMP lifecycle.
639 */
640 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele,
641 tinm) {
642 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele);
643 inm_release_locked(inm);
644 }
645 }
646
647 IGMP_UNLOCK();
648 }
649
650 /*
651 * Hook for domifdetach.
652 */
653 void
654 igmp_domifdetach(struct ifnet *ifp)
655 {
656 struct igmp_ifinfo *igi;
657
658 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
659 __func__, ifp, ifp->if_xname);
660
661 IGMP_LOCK();
662
663 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
664 igi_delete_locked(ifp);
665
666 IGMP_UNLOCK();
667 }
668
669 static void
670 igi_delete_locked(const struct ifnet *ifp)
671 {
672 struct igmp_ifinfo *igi, *tigi;
673
674 CTR3(KTR_IGMPV3, "%s: freeing igmp_ifinfo for ifp %p(%s)",
675 __func__, ifp, ifp->if_xname);
676
677 IGMP_LOCK_ASSERT();
678
679 LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) {
680 if (igi->igi_ifp == ifp) {
681 /*
682 * Free deferred General Query responses.
683 */
684 _IF_DRAIN(&igi->igi_gq);
685
686 LIST_REMOVE(igi, igi_link);
687
688 KASSERT(SLIST_EMPTY(&igi->igi_relinmhead),
689 ("%s: there are dangling in_multi references",
690 __func__));
691
692 free(igi, M_IGMP);
693 return;
694 }
695 }
696
697 #ifdef INVARIANTS
698 panic("%s: igmp_ifinfo not found for ifp %p\n", __func__, ifp);
699 #endif
700 }
701
702 /*
703 * Process a received IGMPv1 query.
704 * Return non-zero if the message should be dropped.
705 *
706 * VIMAGE: The curvnet pointer is derived from the input ifp.
707 */
708 static int
709 igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip,
710 const struct igmp *igmp)
711 {
712 struct ifmultiaddr *ifma;
713 struct igmp_ifinfo *igi;
714 struct in_multi *inm;
715
716 /*
717 * IGMPv1 Host Mmembership Queries SHOULD always be addressed to
718 * 224.0.0.1. They are always treated as General Queries.
719 * igmp_group is always ignored. Do not drop it as a userland
720 * daemon may wish to see it.
721 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
722 */
723 if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) {
724 IGMPSTAT_INC(igps_rcv_badqueries);
725 return (0);
726 }
727 IGMPSTAT_INC(igps_rcv_gen_queries);
728
729 IN_MULTI_LOCK();
730 IGMP_LOCK();
731
732 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
733 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
734
735 if (igi->igi_flags & IGIF_LOOPBACK) {
736 CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)",
737 ifp, ifp->if_xname);
738 goto out_locked;
739 }
740
741 /*
742 * Switch to IGMPv1 host compatibility mode.
743 */
744 igmp_set_version(igi, IGMP_VERSION_1);
745
746 CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname);
747
748 /*
749 * Start the timers in all of our group records
750 * for the interface on which the query arrived,
751 * except those which are already running.
752 */
753 IF_ADDR_RLOCK(ifp);
754 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
755 if (ifma->ifma_addr->sa_family != AF_INET ||
756 ifma->ifma_protospec == NULL)
757 continue;
758 inm = (struct in_multi *)ifma->ifma_protospec;
759 if (inm->inm_timer != 0)
760 continue;
761 switch (inm->inm_state) {
762 case IGMP_NOT_MEMBER:
763 case IGMP_SILENT_MEMBER:
764 break;
765 case IGMP_G_QUERY_PENDING_MEMBER:
766 case IGMP_SG_QUERY_PENDING_MEMBER:
767 case IGMP_REPORTING_MEMBER:
768 case IGMP_IDLE_MEMBER:
769 case IGMP_LAZY_MEMBER:
770 case IGMP_SLEEPING_MEMBER:
771 case IGMP_AWAKENING_MEMBER:
772 inm->inm_state = IGMP_REPORTING_MEMBER;
773 inm->inm_timer = IGMP_RANDOM_DELAY(
774 IGMP_V1V2_MAX_RI * PR_FASTHZ);
775 V_current_state_timers_running = 1;
776 break;
777 case IGMP_LEAVING_MEMBER:
778 break;
779 }
780 }
781 IF_ADDR_RUNLOCK(ifp);
782
783 out_locked:
784 IGMP_UNLOCK();
785 IN_MULTI_UNLOCK();
786
787 return (0);
788 }
789
790 /*
791 * Process a received IGMPv2 general or group-specific query.
792 */
793 static int
794 igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip,
795 const struct igmp *igmp)
796 {
797 struct ifmultiaddr *ifma;
798 struct igmp_ifinfo *igi;
799 struct in_multi *inm;
800 int is_general_query;
801 uint16_t timer;
802
803 is_general_query = 0;
804
805 /*
806 * Validate address fields upfront.
807 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
808 */
809 if (in_nullhost(igmp->igmp_group)) {
810 /*
811 * IGMPv2 General Query.
812 * If this was not sent to the all-hosts group, ignore it.
813 */
814 if (!in_allhosts(ip->ip_dst))
815 return (0);
816 IGMPSTAT_INC(igps_rcv_gen_queries);
817 is_general_query = 1;
818 } else {
819 /* IGMPv2 Group-Specific Query. */
820 IGMPSTAT_INC(igps_rcv_group_queries);
821 }
822
823 IN_MULTI_LOCK();
824 IGMP_LOCK();
825
826 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
827 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
828
829 if (igi->igi_flags & IGIF_LOOPBACK) {
830 CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)",
831 ifp, ifp->if_xname);
832 goto out_locked;
833 }
834
835 /*
836 * Ignore v2 query if in v1 Compatibility Mode.
837 */
838 if (igi->igi_version == IGMP_VERSION_1)
839 goto out_locked;
840
841 igmp_set_version(igi, IGMP_VERSION_2);
842
843 timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE;
844 if (timer == 0)
845 timer = 1;
846
847 if (is_general_query) {
848 /*
849 * For each reporting group joined on this
850 * interface, kick the report timer.
851 */
852 CTR2(KTR_IGMPV3, "process v2 general query on ifp %p(%s)",
853 ifp, ifp->if_xname);
854 IF_ADDR_RLOCK(ifp);
855 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
856 if (ifma->ifma_addr->sa_family != AF_INET ||
857 ifma->ifma_protospec == NULL)
858 continue;
859 inm = (struct in_multi *)ifma->ifma_protospec;
860 igmp_v2_update_group(inm, timer);
861 }
862 IF_ADDR_RUNLOCK(ifp);
863 } else {
864 /*
865 * Group-specific IGMPv2 query, we need only
866 * look up the single group to process it.
867 */
868 inm = inm_lookup(ifp, igmp->igmp_group);
869 if (inm != NULL) {
870 CTR3(KTR_IGMPV3, "process v2 query %s on ifp %p(%s)",
871 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
872 igmp_v2_update_group(inm, timer);
873 }
874 }
875
876 out_locked:
877 IGMP_UNLOCK();
878 IN_MULTI_UNLOCK();
879
880 return (0);
881 }
882
883 /*
884 * Update the report timer on a group in response to an IGMPv2 query.
885 *
886 * If we are becoming the reporting member for this group, start the timer.
887 * If we already are the reporting member for this group, and timer is
888 * below the threshold, reset it.
889 *
890 * We may be updating the group for the first time since we switched
891 * to IGMPv3. If we are, then we must clear any recorded source lists,
892 * and transition to REPORTING state; the group timer is overloaded
893 * for group and group-source query responses.
894 *
895 * Unlike IGMPv3, the delay per group should be jittered
896 * to avoid bursts of IGMPv2 reports.
897 */
898 static void
899 igmp_v2_update_group(struct in_multi *inm, const int timer)
900 {
901
902 CTR4(KTR_IGMPV3, "%s: %s/%s timer=%d", __func__,
903 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname, timer);
904
905 IN_MULTI_LOCK_ASSERT();
906
907 switch (inm->inm_state) {
908 case IGMP_NOT_MEMBER:
909 case IGMP_SILENT_MEMBER:
910 break;
911 case IGMP_REPORTING_MEMBER:
912 if (inm->inm_timer != 0 &&
913 inm->inm_timer <= timer) {
914 CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, "
915 "skipping.", __func__);
916 break;
917 }
918 /* FALLTHROUGH */
919 case IGMP_SG_QUERY_PENDING_MEMBER:
920 case IGMP_G_QUERY_PENDING_MEMBER:
921 case IGMP_IDLE_MEMBER:
922 case IGMP_LAZY_MEMBER:
923 case IGMP_AWAKENING_MEMBER:
924 CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__);
925 inm->inm_state = IGMP_REPORTING_MEMBER;
926 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
927 V_current_state_timers_running = 1;
928 break;
929 case IGMP_SLEEPING_MEMBER:
930 CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__);
931 inm->inm_state = IGMP_AWAKENING_MEMBER;
932 break;
933 case IGMP_LEAVING_MEMBER:
934 break;
935 }
936 }
937
938 /*
939 * Process a received IGMPv3 general, group-specific or
940 * group-and-source-specific query.
941 * Assumes m has already been pulled up to the full IGMP message length.
942 * Return 0 if successful, otherwise an appropriate error code is returned.
943 */
944 static int
945 igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip,
946 /*const*/ struct igmpv3 *igmpv3)
947 {
948 struct igmp_ifinfo *igi;
949 struct in_multi *inm;
950 int is_general_query;
951 uint32_t maxresp, nsrc, qqi;
952 uint16_t timer;
953 uint8_t qrv;
954
955 is_general_query = 0;
956
957 CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname);
958
959 maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */
960 if (maxresp >= 128) {
961 maxresp = IGMP_MANT(igmpv3->igmp_code) <<
962 (IGMP_EXP(igmpv3->igmp_code) + 3);
963 }
964
965 /*
966 * Robustness must never be less than 2 for on-wire IGMPv3.
967 * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make
968 * an exception for interfaces whose IGMPv3 state changes
969 * are redirected to loopback (e.g. MANET).
970 */
971 qrv = IGMP_QRV(igmpv3->igmp_misc);
972 if (qrv < 2) {
973 CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__,
974 qrv, IGMP_RV_INIT);
975 qrv = IGMP_RV_INIT;
976 }
977
978 qqi = igmpv3->igmp_qqi;
979 if (qqi >= 128) {
980 qqi = IGMP_MANT(igmpv3->igmp_qqi) <<
981 (IGMP_EXP(igmpv3->igmp_qqi) + 3);
982 }
983
984 timer = maxresp * PR_FASTHZ / IGMP_TIMER_SCALE;
985 if (timer == 0)
986 timer = 1;
987
988 nsrc = ntohs(igmpv3->igmp_numsrc);
989
990 /*
991 * Validate address fields and versions upfront before
992 * accepting v3 query.
993 * XXX SMPng: Unlocked access to igmpstat counters here.
994 */
995 if (in_nullhost(igmpv3->igmp_group)) {
996 /*
997 * IGMPv3 General Query.
998 *
999 * General Queries SHOULD be directed to 224.0.0.1.
1000 * A general query with a source list has undefined
1001 * behaviour; discard it.
1002 */
1003 IGMPSTAT_INC(igps_rcv_gen_queries);
1004 if (!in_allhosts(ip->ip_dst) || nsrc > 0) {
1005 IGMPSTAT_INC(igps_rcv_badqueries);
1006 return (0);
1007 }
1008 is_general_query = 1;
1009 } else {
1010 /* Group or group-source specific query. */
1011 if (nsrc == 0)
1012 IGMPSTAT_INC(igps_rcv_group_queries);
1013 else
1014 IGMPSTAT_INC(igps_rcv_gsr_queries);
1015 }
1016
1017 IN_MULTI_LOCK();
1018 IGMP_LOCK();
1019
1020 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
1021 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
1022
1023 if (igi->igi_flags & IGIF_LOOPBACK) {
1024 CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)",
1025 ifp, ifp->if_xname);
1026 goto out_locked;
1027 }
1028
1029 /*
1030 * Discard the v3 query if we're in Compatibility Mode.
1031 * The RFC is not obviously worded that hosts need to stay in
1032 * compatibility mode until the Old Version Querier Present
1033 * timer expires.
1034 */
1035 if (igi->igi_version != IGMP_VERSION_3) {
1036 CTR3(KTR_IGMPV3, "ignore v3 query in v%d mode on ifp %p(%s)",
1037 igi->igi_version, ifp, ifp->if_xname);
1038 goto out_locked;
1039 }
1040
1041 igmp_set_version(igi, IGMP_VERSION_3);
1042 igi->igi_rv = qrv;
1043 igi->igi_qi = qqi;
1044 igi->igi_qri = maxresp;
1045
1046 CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi,
1047 maxresp);
1048
1049 if (is_general_query) {
1050 /*
1051 * Schedule a current-state report on this ifp for
1052 * all groups, possibly containing source lists.
1053 * If there is a pending General Query response
1054 * scheduled earlier than the selected delay, do
1055 * not schedule any other reports.
1056 * Otherwise, reset the interface timer.
1057 */
1058 CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)",
1059 ifp, ifp->if_xname);
1060 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) {
1061 igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer);
1062 V_interface_timers_running = 1;
1063 }
1064 } else {
1065 /*
1066 * Group-source-specific queries are throttled on
1067 * a per-group basis to defeat denial-of-service attempts.
1068 * Queries for groups we are not a member of on this
1069 * link are simply ignored.
1070 */
1071 inm = inm_lookup(ifp, igmpv3->igmp_group);
1072 if (inm == NULL)
1073 goto out_locked;
1074 if (nsrc > 0) {
1075 if (!ratecheck(&inm->inm_lastgsrtv,
1076 &V_igmp_gsrdelay)) {
1077 CTR1(KTR_IGMPV3, "%s: GS query throttled.",
1078 __func__);
1079 IGMPSTAT_INC(igps_drop_gsr_queries);
1080 goto out_locked;
1081 }
1082 }
1083 CTR3(KTR_IGMPV3, "process v3 %s query on ifp %p(%s)",
1084 inet_ntoa(igmpv3->igmp_group), ifp, ifp->if_xname);
1085 /*
1086 * If there is a pending General Query response
1087 * scheduled sooner than the selected delay, no
1088 * further report need be scheduled.
1089 * Otherwise, prepare to respond to the
1090 * group-specific or group-and-source query.
1091 */
1092 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer)
1093 igmp_input_v3_group_query(inm, igi, timer, igmpv3);
1094 }
1095
1096 out_locked:
1097 IGMP_UNLOCK();
1098 IN_MULTI_UNLOCK();
1099
1100 return (0);
1101 }
1102
1103 /*
1104 * Process a recieved IGMPv3 group-specific or group-and-source-specific
1105 * query.
1106 * Return <0 if any error occured. Currently this is ignored.
1107 */
1108 static int
1109 igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifinfo *igi,
1110 int timer, /*const*/ struct igmpv3 *igmpv3)
1111 {
1112 int retval;
1113 uint16_t nsrc;
1114
1115 IN_MULTI_LOCK_ASSERT();
1116 IGMP_LOCK_ASSERT();
1117
1118 retval = 0;
1119
1120 switch (inm->inm_state) {
1121 case IGMP_NOT_MEMBER:
1122 case IGMP_SILENT_MEMBER:
1123 case IGMP_SLEEPING_MEMBER:
1124 case IGMP_LAZY_MEMBER:
1125 case IGMP_AWAKENING_MEMBER:
1126 case IGMP_IDLE_MEMBER:
1127 case IGMP_LEAVING_MEMBER:
1128 return (retval);
1129 break;
1130 case IGMP_REPORTING_MEMBER:
1131 case IGMP_G_QUERY_PENDING_MEMBER:
1132 case IGMP_SG_QUERY_PENDING_MEMBER:
1133 break;
1134 }
1135
1136 nsrc = ntohs(igmpv3->igmp_numsrc);
1137
1138 /*
1139 * Deal with group-specific queries upfront.
1140 * If any group query is already pending, purge any recorded
1141 * source-list state if it exists, and schedule a query response
1142 * for this group-specific query.
1143 */
1144 if (nsrc == 0) {
1145 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
1146 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) {
1147 inm_clear_recorded(inm);
1148 timer = min(inm->inm_timer, timer);
1149 }
1150 inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER;
1151 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1152 V_current_state_timers_running = 1;
1153 return (retval);
1154 }
1155
1156 /*
1157 * Deal with the case where a group-and-source-specific query has
1158 * been received but a group-specific query is already pending.
1159 */
1160 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) {
1161 timer = min(inm->inm_timer, timer);
1162 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1163 V_current_state_timers_running = 1;
1164 return (retval);
1165 }
1166
1167 /*
1168 * Finally, deal with the case where a group-and-source-specific
1169 * query has been received, where a response to a previous g-s-r
1170 * query exists, or none exists.
1171 * In this case, we need to parse the source-list which the Querier
1172 * has provided us with and check if we have any source list filter
1173 * entries at T1 for these sources. If we do not, there is no need
1174 * schedule a report and the query may be dropped.
1175 * If we do, we must record them and schedule a current-state
1176 * report for those sources.
1177 * FIXME: Handling source lists larger than 1 mbuf requires that
1178 * we pass the mbuf chain pointer down to this function, and use
1179 * m_getptr() to walk the chain.
1180 */
1181 if (inm->inm_nsrc > 0) {
1182 const struct in_addr *ap;
1183 int i, nrecorded;
1184
1185 ap = (const struct in_addr *)(igmpv3 + 1);
1186 nrecorded = 0;
1187 for (i = 0; i < nsrc; i++, ap++) {
1188 retval = inm_record_source(inm, ap->s_addr);
1189 if (retval < 0)
1190 break;
1191 nrecorded += retval;
1192 }
1193 if (nrecorded > 0) {
1194 CTR1(KTR_IGMPV3,
1195 "%s: schedule response to SG query", __func__);
1196 inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER;
1197 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1198 V_current_state_timers_running = 1;
1199 }
1200 }
1201
1202 return (retval);
1203 }
1204
1205 /*
1206 * Process a received IGMPv1 host membership report.
1207 *
1208 * NOTE: 0.0.0.0 workaround breaks const correctness.
1209 */
1210 static int
1211 igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1212 /*const*/ struct igmp *igmp)
1213 {
1214 struct in_ifaddr *ia;
1215 struct in_multi *inm;
1216
1217 IGMPSTAT_INC(igps_rcv_reports);
1218
1219 if (ifp->if_flags & IFF_LOOPBACK)
1220 return (0);
1221
1222 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1223 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1224 IGMPSTAT_INC(igps_rcv_badreports);
1225 return (EINVAL);
1226 }
1227
1228 /*
1229 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1230 * Booting clients may use the source address 0.0.0.0. Some
1231 * IGMP daemons may not know how to use IP_RECVIF to determine
1232 * the interface upon which this message was received.
1233 * Replace 0.0.0.0 with the subnet address if told to do so.
1234 */
1235 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1236 IFP_TO_IA(ifp, ia);
1237 if (ia != NULL) {
1238 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1239 ifa_free(&ia->ia_ifa);
1240 }
1241 }
1242
1243 CTR3(KTR_IGMPV3, "process v1 report %s on ifp %p(%s)",
1244 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1245
1246 /*
1247 * IGMPv1 report suppression.
1248 * If we are a member of this group, and our membership should be
1249 * reported, stop our group timer and transition to the 'lazy' state.
1250 */
1251 IN_MULTI_LOCK();
1252 inm = inm_lookup(ifp, igmp->igmp_group);
1253 if (inm != NULL) {
1254 struct igmp_ifinfo *igi;
1255
1256 igi = inm->inm_igi;
1257 if (igi == NULL) {
1258 KASSERT(igi != NULL,
1259 ("%s: no igi for ifp %p", __func__, ifp));
1260 goto out_locked;
1261 }
1262
1263 IGMPSTAT_INC(igps_rcv_ourreports);
1264
1265 /*
1266 * If we are in IGMPv3 host mode, do not allow the
1267 * other host's IGMPv1 report to suppress our reports
1268 * unless explicitly configured to do so.
1269 */
1270 if (igi->igi_version == IGMP_VERSION_3) {
1271 if (V_igmp_legacysupp)
1272 igmp_v3_suppress_group_record(inm);
1273 goto out_locked;
1274 }
1275
1276 inm->inm_timer = 0;
1277
1278 switch (inm->inm_state) {
1279 case IGMP_NOT_MEMBER:
1280 case IGMP_SILENT_MEMBER:
1281 break;
1282 case IGMP_IDLE_MEMBER:
1283 case IGMP_LAZY_MEMBER:
1284 case IGMP_AWAKENING_MEMBER:
1285 CTR3(KTR_IGMPV3,
1286 "report suppressed for %s on ifp %p(%s)",
1287 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1288 case IGMP_SLEEPING_MEMBER:
1289 inm->inm_state = IGMP_SLEEPING_MEMBER;
1290 break;
1291 case IGMP_REPORTING_MEMBER:
1292 CTR3(KTR_IGMPV3,
1293 "report suppressed for %s on ifp %p(%s)",
1294 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1295 if (igi->igi_version == IGMP_VERSION_1)
1296 inm->inm_state = IGMP_LAZY_MEMBER;
1297 else if (igi->igi_version == IGMP_VERSION_2)
1298 inm->inm_state = IGMP_SLEEPING_MEMBER;
1299 break;
1300 case IGMP_G_QUERY_PENDING_MEMBER:
1301 case IGMP_SG_QUERY_PENDING_MEMBER:
1302 case IGMP_LEAVING_MEMBER:
1303 break;
1304 }
1305 }
1306
1307 out_locked:
1308 IN_MULTI_UNLOCK();
1309
1310 return (0);
1311 }
1312
1313 /*
1314 * Process a received IGMPv2 host membership report.
1315 *
1316 * NOTE: 0.0.0.0 workaround breaks const correctness.
1317 */
1318 static int
1319 igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1320 /*const*/ struct igmp *igmp)
1321 {
1322 struct in_ifaddr *ia;
1323 struct in_multi *inm;
1324
1325 /*
1326 * Make sure we don't hear our own membership report. Fast
1327 * leave requires knowing that we are the only member of a
1328 * group.
1329 */
1330 IFP_TO_IA(ifp, ia);
1331 if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) {
1332 ifa_free(&ia->ia_ifa);
1333 return (0);
1334 }
1335
1336 IGMPSTAT_INC(igps_rcv_reports);
1337
1338 if (ifp->if_flags & IFF_LOOPBACK) {
1339 if (ia != NULL)
1340 ifa_free(&ia->ia_ifa);
1341 return (0);
1342 }
1343
1344 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1345 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1346 if (ia != NULL)
1347 ifa_free(&ia->ia_ifa);
1348 IGMPSTAT_INC(igps_rcv_badreports);
1349 return (EINVAL);
1350 }
1351
1352 /*
1353 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1354 * Booting clients may use the source address 0.0.0.0. Some
1355 * IGMP daemons may not know how to use IP_RECVIF to determine
1356 * the interface upon which this message was received.
1357 * Replace 0.0.0.0 with the subnet address if told to do so.
1358 */
1359 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1360 if (ia != NULL)
1361 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1362 }
1363 if (ia != NULL)
1364 ifa_free(&ia->ia_ifa);
1365
1366 CTR3(KTR_IGMPV3, "process v2 report %s on ifp %p(%s)",
1367 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1368
1369 /*
1370 * IGMPv2 report suppression.
1371 * If we are a member of this group, and our membership should be
1372 * reported, and our group timer is pending or about to be reset,
1373 * stop our group timer by transitioning to the 'lazy' state.
1374 */
1375 IN_MULTI_LOCK();
1376 inm = inm_lookup(ifp, igmp->igmp_group);
1377 if (inm != NULL) {
1378 struct igmp_ifinfo *igi;
1379
1380 igi = inm->inm_igi;
1381 KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp));
1382
1383 IGMPSTAT_INC(igps_rcv_ourreports);
1384
1385 /*
1386 * If we are in IGMPv3 host mode, do not allow the
1387 * other host's IGMPv1 report to suppress our reports
1388 * unless explicitly configured to do so.
1389 */
1390 if (igi->igi_version == IGMP_VERSION_3) {
1391 if (V_igmp_legacysupp)
1392 igmp_v3_suppress_group_record(inm);
1393 goto out_locked;
1394 }
1395
1396 inm->inm_timer = 0;
1397
1398 switch (inm->inm_state) {
1399 case IGMP_NOT_MEMBER:
1400 case IGMP_SILENT_MEMBER:
1401 case IGMP_SLEEPING_MEMBER:
1402 break;
1403 case IGMP_REPORTING_MEMBER:
1404 case IGMP_IDLE_MEMBER:
1405 case IGMP_AWAKENING_MEMBER:
1406 CTR3(KTR_IGMPV3,
1407 "report suppressed for %s on ifp %p(%s)",
1408 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1409 case IGMP_LAZY_MEMBER:
1410 inm->inm_state = IGMP_LAZY_MEMBER;
1411 break;
1412 case IGMP_G_QUERY_PENDING_MEMBER:
1413 case IGMP_SG_QUERY_PENDING_MEMBER:
1414 case IGMP_LEAVING_MEMBER:
1415 break;
1416 }
1417 }
1418
1419 out_locked:
1420 IN_MULTI_UNLOCK();
1421
1422 return (0);
1423 }
1424
1425 void
1426 igmp_input(struct mbuf *m, int off)
1427 {
1428 int iphlen;
1429 struct ifnet *ifp;
1430 struct igmp *igmp;
1431 struct ip *ip;
1432 int igmplen;
1433 int minlen;
1434 int queryver;
1435
1436 CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, m, off);
1437
1438 ifp = m->m_pkthdr.rcvif;
1439
1440 IGMPSTAT_INC(igps_rcv_total);
1441
1442 ip = mtod(m, struct ip *);
1443 iphlen = off;
1444 igmplen = ip->ip_len;
1445
1446 /*
1447 * Validate lengths.
1448 */
1449 if (igmplen < IGMP_MINLEN) {
1450 IGMPSTAT_INC(igps_rcv_tooshort);
1451 m_freem(m);
1452 return;
1453 }
1454
1455 /*
1456 * Always pullup to the minimum size for v1/v2 or v3
1457 * to amortize calls to m_pullup().
1458 */
1459 minlen = iphlen;
1460 if (igmplen >= IGMP_V3_QUERY_MINLEN)
1461 minlen += IGMP_V3_QUERY_MINLEN;
1462 else
1463 minlen += IGMP_MINLEN;
1464 if ((m->m_flags & M_EXT || m->m_len < minlen) &&
1465 (m = m_pullup(m, minlen)) == 0) {
1466 IGMPSTAT_INC(igps_rcv_tooshort);
1467 return;
1468 }
1469 ip = mtod(m, struct ip *);
1470
1471 /*
1472 * Validate checksum.
1473 */
1474 m->m_data += iphlen;
1475 m->m_len -= iphlen;
1476 igmp = mtod(m, struct igmp *);
1477 if (in_cksum(m, igmplen)) {
1478 IGMPSTAT_INC(igps_rcv_badsum);
1479 m_freem(m);
1480 return;
1481 }
1482 m->m_data -= iphlen;
1483 m->m_len += iphlen;
1484
1485 /*
1486 * IGMP control traffic is link-scope, and must have a TTL of 1.
1487 * DVMRP traffic (e.g. mrinfo, mtrace) is an exception;
1488 * probe packets may come from beyond the LAN.
1489 */
1490 if (igmp->igmp_type != IGMP_DVMRP && ip->ip_ttl != 1) {
1491 IGMPSTAT_INC(igps_rcv_badttl);
1492 m_freem(m);
1493 return;
1494 }
1495
1496 switch (igmp->igmp_type) {
1497 case IGMP_HOST_MEMBERSHIP_QUERY:
1498 if (igmplen == IGMP_MINLEN) {
1499 if (igmp->igmp_code == 0)
1500 queryver = IGMP_VERSION_1;
1501 else
1502 queryver = IGMP_VERSION_2;
1503 } else if (igmplen >= IGMP_V3_QUERY_MINLEN) {
1504 queryver = IGMP_VERSION_3;
1505 } else {
1506 IGMPSTAT_INC(igps_rcv_tooshort);
1507 m_freem(m);
1508 return;
1509 }
1510
1511 switch (queryver) {
1512 case IGMP_VERSION_1:
1513 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1514 if (!V_igmp_v1enable)
1515 break;
1516 if (igmp_input_v1_query(ifp, ip, igmp) != 0) {
1517 m_freem(m);
1518 return;
1519 }
1520 break;
1521
1522 case IGMP_VERSION_2:
1523 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1524 if (!V_igmp_v2enable)
1525 break;
1526 if (igmp_input_v2_query(ifp, ip, igmp) != 0) {
1527 m_freem(m);
1528 return;
1529 }
1530 break;
1531
1532 case IGMP_VERSION_3: {
1533 struct igmpv3 *igmpv3;
1534 uint16_t igmpv3len;
1535 uint16_t srclen;
1536 int nsrc;
1537
1538 IGMPSTAT_INC(igps_rcv_v3_queries);
1539 igmpv3 = (struct igmpv3 *)igmp;
1540 /*
1541 * Validate length based on source count.
1542 */
1543 nsrc = ntohs(igmpv3->igmp_numsrc);
1544 srclen = sizeof(struct in_addr) * nsrc;
1545 if (nsrc * sizeof(in_addr_t) > srclen) {
1546 IGMPSTAT_INC(igps_rcv_tooshort);
1547 return;
1548 }
1549 /*
1550 * m_pullup() may modify m, so pullup in
1551 * this scope.
1552 */
1553 igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN +
1554 srclen;
1555 if ((m->m_flags & M_EXT ||
1556 m->m_len < igmpv3len) &&
1557 (m = m_pullup(m, igmpv3len)) == NULL) {
1558 IGMPSTAT_INC(igps_rcv_tooshort);
1559 return;
1560 }
1561 igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *)
1562 + iphlen);
1563 if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) {
1564 m_freem(m);
1565 return;
1566 }
1567 }
1568 break;
1569 }
1570 break;
1571
1572 case IGMP_v1_HOST_MEMBERSHIP_REPORT:
1573 if (!V_igmp_v1enable)
1574 break;
1575 if (igmp_input_v1_report(ifp, ip, igmp) != 0) {
1576 m_freem(m);
1577 return;
1578 }
1579 break;
1580
1581 case IGMP_v2_HOST_MEMBERSHIP_REPORT:
1582 if (!V_igmp_v2enable)
1583 break;
1584 if (!ip_checkrouteralert(m))
1585 IGMPSTAT_INC(igps_rcv_nora);
1586 if (igmp_input_v2_report(ifp, ip, igmp) != 0) {
1587 m_freem(m);
1588 return;
1589 }
1590 break;
1591
1592 case IGMP_v3_HOST_MEMBERSHIP_REPORT:
1593 /*
1594 * Hosts do not need to process IGMPv3 membership reports,
1595 * as report suppression is no longer required.
1596 */
1597 if (!ip_checkrouteralert(m))
1598 IGMPSTAT_INC(igps_rcv_nora);
1599 break;
1600
1601 default:
1602 break;
1603 }
1604
1605 /*
1606 * Pass all valid IGMP packets up to any process(es) listening on a
1607 * raw IGMP socket.
1608 */
1609 rip_input(m, off);
1610 }
1611
1612
1613 /*
1614 * Fast timeout handler (global).
1615 * VIMAGE: Timeout handlers are expected to service all vimages.
1616 */
1617 void
1618 igmp_fasttimo(void)
1619 {
1620 VNET_ITERATOR_DECL(vnet_iter);
1621
1622 VNET_LIST_RLOCK_NOSLEEP();
1623 VNET_FOREACH(vnet_iter) {
1624 CURVNET_SET(vnet_iter);
1625 igmp_fasttimo_vnet();
1626 CURVNET_RESTORE();
1627 }
1628 VNET_LIST_RUNLOCK_NOSLEEP();
1629 }
1630
1631 /*
1632 * Fast timeout handler (per-vnet).
1633 * Sends are shuffled off to a netisr to deal with Giant.
1634 *
1635 * VIMAGE: Assume caller has set up our curvnet.
1636 */
1637 static void
1638 igmp_fasttimo_vnet(void)
1639 {
1640 struct ifqueue scq; /* State-change packets */
1641 struct ifqueue qrq; /* Query response packets */
1642 struct ifnet *ifp;
1643 struct igmp_ifinfo *igi;
1644 struct ifmultiaddr *ifma;
1645 struct in_multi *inm;
1646 int loop, uri_fasthz;
1647
1648 loop = 0;
1649 uri_fasthz = 0;
1650
1651 /*
1652 * Quick check to see if any work needs to be done, in order to
1653 * minimize the overhead of fasttimo processing.
1654 * SMPng: XXX Unlocked reads.
1655 */
1656 if (!V_current_state_timers_running &&
1657 !V_interface_timers_running &&
1658 !V_state_change_timers_running)
1659 return;
1660
1661 IN_MULTI_LOCK();
1662 IGMP_LOCK();
1663
1664 /*
1665 * IGMPv3 General Query response timer processing.
1666 */
1667 if (V_interface_timers_running) {
1668 CTR1(KTR_IGMPV3, "%s: interface timers running", __func__);
1669
1670 V_interface_timers_running = 0;
1671 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1672 if (igi->igi_v3_timer == 0) {
1673 /* Do nothing. */
1674 } else if (--igi->igi_v3_timer == 0) {
1675 igmp_v3_dispatch_general_query(igi);
1676 } else {
1677 V_interface_timers_running = 1;
1678 }
1679 }
1680 }
1681
1682 if (!V_current_state_timers_running &&
1683 !V_state_change_timers_running)
1684 goto out_locked;
1685
1686 V_current_state_timers_running = 0;
1687 V_state_change_timers_running = 0;
1688
1689 CTR1(KTR_IGMPV3, "%s: state change timers running", __func__);
1690
1691 /*
1692 * IGMPv1/v2/v3 host report and state-change timer processing.
1693 * Note: Processing a v3 group timer may remove a node.
1694 */
1695 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1696 ifp = igi->igi_ifp;
1697
1698 if (igi->igi_version == IGMP_VERSION_3) {
1699 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
1700 uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri *
1701 PR_FASTHZ);
1702
1703 memset(&qrq, 0, sizeof(struct ifqueue));
1704 IFQ_SET_MAXLEN(&qrq, IGMP_MAX_G_GS_PACKETS);
1705
1706 memset(&scq, 0, sizeof(struct ifqueue));
1707 IFQ_SET_MAXLEN(&scq, IGMP_MAX_STATE_CHANGE_PACKETS);
1708 }
1709
1710 IF_ADDR_RLOCK(ifp);
1711 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1712 if (ifma->ifma_addr->sa_family != AF_INET ||
1713 ifma->ifma_protospec == NULL)
1714 continue;
1715 inm = (struct in_multi *)ifma->ifma_protospec;
1716 switch (igi->igi_version) {
1717 case IGMP_VERSION_1:
1718 case IGMP_VERSION_2:
1719 igmp_v1v2_process_group_timer(inm,
1720 igi->igi_version);
1721 break;
1722 case IGMP_VERSION_3:
1723 igmp_v3_process_group_timers(igi, &qrq,
1724 &scq, inm, uri_fasthz);
1725 break;
1726 }
1727 }
1728 IF_ADDR_RUNLOCK(ifp);
1729
1730 if (igi->igi_version == IGMP_VERSION_3) {
1731 struct in_multi *tinm;
1732
1733 igmp_dispatch_queue(&qrq, 0, loop);
1734 igmp_dispatch_queue(&scq, 0, loop);
1735
1736 /*
1737 * Free the in_multi reference(s) for this
1738 * IGMP lifecycle.
1739 */
1740 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead,
1741 inm_nrele, tinm) {
1742 SLIST_REMOVE_HEAD(&igi->igi_relinmhead,
1743 inm_nrele);
1744 inm_release_locked(inm);
1745 }
1746 }
1747 }
1748
1749 out_locked:
1750 IGMP_UNLOCK();
1751 IN_MULTI_UNLOCK();
1752 }
1753
1754 /*
1755 * Update host report group timer for IGMPv1/v2.
1756 * Will update the global pending timer flags.
1757 */
1758 static void
1759 igmp_v1v2_process_group_timer(struct in_multi *inm, const int version)
1760 {
1761 int report_timer_expired;
1762
1763 IN_MULTI_LOCK_ASSERT();
1764 IGMP_LOCK_ASSERT();
1765
1766 if (inm->inm_timer == 0) {
1767 report_timer_expired = 0;
1768 } else if (--inm->inm_timer == 0) {
1769 report_timer_expired = 1;
1770 } else {
1771 V_current_state_timers_running = 1;
1772 return;
1773 }
1774
1775 switch (inm->inm_state) {
1776 case IGMP_NOT_MEMBER:
1777 case IGMP_SILENT_MEMBER:
1778 case IGMP_IDLE_MEMBER:
1779 case IGMP_LAZY_MEMBER:
1780 case IGMP_SLEEPING_MEMBER:
1781 case IGMP_AWAKENING_MEMBER:
1782 break;
1783 case IGMP_REPORTING_MEMBER:
1784 if (report_timer_expired) {
1785 inm->inm_state = IGMP_IDLE_MEMBER;
1786 (void)igmp_v1v2_queue_report(inm,
1787 (version == IGMP_VERSION_2) ?
1788 IGMP_v2_HOST_MEMBERSHIP_REPORT :
1789 IGMP_v1_HOST_MEMBERSHIP_REPORT);
1790 }
1791 break;
1792 case IGMP_G_QUERY_PENDING_MEMBER:
1793 case IGMP_SG_QUERY_PENDING_MEMBER:
1794 case IGMP_LEAVING_MEMBER:
1795 break;
1796 }
1797 }
1798
1799 /*
1800 * Update a group's timers for IGMPv3.
1801 * Will update the global pending timer flags.
1802 * Note: Unlocked read from igi.
1803 */
1804 static void
1805 igmp_v3_process_group_timers(struct igmp_ifinfo *igi,
1806 struct ifqueue *qrq, struct ifqueue *scq,
1807 struct in_multi *inm, const int uri_fasthz)
1808 {
1809 int query_response_timer_expired;
1810 int state_change_retransmit_timer_expired;
1811
1812 IN_MULTI_LOCK_ASSERT();
1813 IGMP_LOCK_ASSERT();
1814
1815 query_response_timer_expired = 0;
1816 state_change_retransmit_timer_expired = 0;
1817
1818 /*
1819 * During a transition from v1/v2 compatibility mode back to v3,
1820 * a group record in REPORTING state may still have its group
1821 * timer active. This is a no-op in this function; it is easier
1822 * to deal with it here than to complicate the slow-timeout path.
1823 */
1824 if (inm->inm_timer == 0) {
1825 query_response_timer_expired = 0;
1826 } else if (--inm->inm_timer == 0) {
1827 query_response_timer_expired = 1;
1828 } else {
1829 V_current_state_timers_running = 1;
1830 }
1831
1832 if (inm->inm_sctimer == 0) {
1833 state_change_retransmit_timer_expired = 0;
1834 } else if (--inm->inm_sctimer == 0) {
1835 state_change_retransmit_timer_expired = 1;
1836 } else {
1837 V_state_change_timers_running = 1;
1838 }
1839
1840 /* We are in fasttimo, so be quick about it. */
1841 if (!state_change_retransmit_timer_expired &&
1842 !query_response_timer_expired)
1843 return;
1844
1845 switch (inm->inm_state) {
1846 case IGMP_NOT_MEMBER:
1847 case IGMP_SILENT_MEMBER:
1848 case IGMP_SLEEPING_MEMBER:
1849 case IGMP_LAZY_MEMBER:
1850 case IGMP_AWAKENING_MEMBER:
1851 case IGMP_IDLE_MEMBER:
1852 break;
1853 case IGMP_G_QUERY_PENDING_MEMBER:
1854 case IGMP_SG_QUERY_PENDING_MEMBER:
1855 /*
1856 * Respond to a previously pending Group-Specific
1857 * or Group-and-Source-Specific query by enqueueing
1858 * the appropriate Current-State report for
1859 * immediate transmission.
1860 */
1861 if (query_response_timer_expired) {
1862 int retval;
1863
1864 retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1,
1865 (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER));
1866 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
1867 __func__, retval);
1868 inm->inm_state = IGMP_REPORTING_MEMBER;
1869 /* XXX Clear recorded sources for next time. */
1870 inm_clear_recorded(inm);
1871 }
1872 /* FALLTHROUGH */
1873 case IGMP_REPORTING_MEMBER:
1874 case IGMP_LEAVING_MEMBER:
1875 if (state_change_retransmit_timer_expired) {
1876 /*
1877 * State-change retransmission timer fired.
1878 * If there are any further pending retransmissions,
1879 * set the global pending state-change flag, and
1880 * reset the timer.
1881 */
1882 if (--inm->inm_scrv > 0) {
1883 inm->inm_sctimer = uri_fasthz;
1884 V_state_change_timers_running = 1;
1885 }
1886 /*
1887 * Retransmit the previously computed state-change
1888 * report. If there are no further pending
1889 * retransmissions, the mbuf queue will be consumed.
1890 * Update T0 state to T1 as we have now sent
1891 * a state-change.
1892 */
1893 (void)igmp_v3_merge_state_changes(inm, scq);
1894
1895 inm_commit(inm);
1896 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
1897 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
1898
1899 /*
1900 * If we are leaving the group for good, make sure
1901 * we release IGMP's reference to it.
1902 * This release must be deferred using a SLIST,
1903 * as we are called from a loop which traverses
1904 * the in_ifmultiaddr TAILQ.
1905 */
1906 if (inm->inm_state == IGMP_LEAVING_MEMBER &&
1907 inm->inm_scrv == 0) {
1908 inm->inm_state = IGMP_NOT_MEMBER;
1909 SLIST_INSERT_HEAD(&igi->igi_relinmhead,
1910 inm, inm_nrele);
1911 }
1912 }
1913 break;
1914 }
1915 }
1916
1917
1918 /*
1919 * Suppress a group's pending response to a group or source/group query.
1920 *
1921 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency.
1922 * Do NOT update ST1/ST0 as this operation merely suppresses
1923 * the currently pending group record.
1924 * Do NOT suppress the response to a general query. It is possible but
1925 * it would require adding another state or flag.
1926 */
1927 static void
1928 igmp_v3_suppress_group_record(struct in_multi *inm)
1929 {
1930
1931 IN_MULTI_LOCK_ASSERT();
1932
1933 KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3,
1934 ("%s: not IGMPv3 mode on link", __func__));
1935
1936 if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER ||
1937 inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER)
1938 return;
1939
1940 if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
1941 inm_clear_recorded(inm);
1942
1943 inm->inm_timer = 0;
1944 inm->inm_state = IGMP_REPORTING_MEMBER;
1945 }
1946
1947 /*
1948 * Switch to a different IGMP version on the given interface,
1949 * as per Section 7.2.1.
1950 */
1951 static void
1952 igmp_set_version(struct igmp_ifinfo *igi, const int version)
1953 {
1954 int old_version_timer;
1955
1956 IGMP_LOCK_ASSERT();
1957
1958 CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__,
1959 version, igi->igi_ifp, igi->igi_ifp->if_xname);
1960
1961 if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) {
1962 /*
1963 * Compute the "Older Version Querier Present" timer as per
1964 * Section 8.12.
1965 */
1966 old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri;
1967 old_version_timer *= PR_SLOWHZ;
1968
1969 if (version == IGMP_VERSION_1) {
1970 igi->igi_v1_timer = old_version_timer;
1971 igi->igi_v2_timer = 0;
1972 } else if (version == IGMP_VERSION_2) {
1973 igi->igi_v1_timer = 0;
1974 igi->igi_v2_timer = old_version_timer;
1975 }
1976 }
1977
1978 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
1979 if (igi->igi_version != IGMP_VERSION_2) {
1980 igi->igi_version = IGMP_VERSION_2;
1981 igmp_v3_cancel_link_timers(igi);
1982 }
1983 } else if (igi->igi_v1_timer > 0) {
1984 if (igi->igi_version != IGMP_VERSION_1) {
1985 igi->igi_version = IGMP_VERSION_1;
1986 igmp_v3_cancel_link_timers(igi);
1987 }
1988 }
1989 }
1990
1991 /*
1992 * Cancel pending IGMPv3 timers for the given link and all groups
1993 * joined on it; state-change, general-query, and group-query timers.
1994 *
1995 * Only ever called on a transition from v3 to Compatibility mode. Kill
1996 * the timers stone dead (this may be expensive for large N groups), they
1997 * will be restarted if Compatibility Mode deems that they must be due to
1998 * query processing.
1999 */
2000 static void
2001 igmp_v3_cancel_link_timers(struct igmp_ifinfo *igi)
2002 {
2003 struct ifmultiaddr *ifma;
2004 struct ifnet *ifp;
2005 struct in_multi *inm, *tinm;
2006
2007 CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__,
2008 igi->igi_ifp, igi->igi_ifp->if_xname);
2009
2010 IN_MULTI_LOCK_ASSERT();
2011 IGMP_LOCK_ASSERT();
2012
2013 /*
2014 * Stop the v3 General Query Response on this link stone dead.
2015 * If fasttimo is woken up due to V_interface_timers_running,
2016 * the flag will be cleared if there are no pending link timers.
2017 */
2018 igi->igi_v3_timer = 0;
2019
2020 /*
2021 * Now clear the current-state and state-change report timers
2022 * for all memberships scoped to this link.
2023 */
2024 ifp = igi->igi_ifp;
2025 IF_ADDR_RLOCK(ifp);
2026 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2027 if (ifma->ifma_addr->sa_family != AF_INET ||
2028 ifma->ifma_protospec == NULL)
2029 continue;
2030 inm = (struct in_multi *)ifma->ifma_protospec;
2031 switch (inm->inm_state) {
2032 case IGMP_NOT_MEMBER:
2033 case IGMP_SILENT_MEMBER:
2034 case IGMP_IDLE_MEMBER:
2035 case IGMP_LAZY_MEMBER:
2036 case IGMP_SLEEPING_MEMBER:
2037 case IGMP_AWAKENING_MEMBER:
2038 /*
2039 * These states are either not relevant in v3 mode,
2040 * or are unreported. Do nothing.
2041 */
2042 break;
2043 case IGMP_LEAVING_MEMBER:
2044 /*
2045 * If we are leaving the group and switching to
2046 * compatibility mode, we need to release the final
2047 * reference held for issuing the INCLUDE {}, and
2048 * transition to REPORTING to ensure the host leave
2049 * message is sent upstream to the old querier --
2050 * transition to NOT would lose the leave and race.
2051 */
2052 SLIST_INSERT_HEAD(&igi->igi_relinmhead, inm, inm_nrele);
2053 /* FALLTHROUGH */
2054 case IGMP_G_QUERY_PENDING_MEMBER:
2055 case IGMP_SG_QUERY_PENDING_MEMBER:
2056 inm_clear_recorded(inm);
2057 /* FALLTHROUGH */
2058 case IGMP_REPORTING_MEMBER:
2059 inm->inm_state = IGMP_REPORTING_MEMBER;
2060 break;
2061 }
2062 /*
2063 * Always clear state-change and group report timers.
2064 * Free any pending IGMPv3 state-change records.
2065 */
2066 inm->inm_sctimer = 0;
2067 inm->inm_timer = 0;
2068 _IF_DRAIN(&inm->inm_scq);
2069 }
2070 IF_ADDR_RUNLOCK(ifp);
2071 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele, tinm) {
2072 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele);
2073 inm_release_locked(inm);
2074 }
2075 }
2076
2077 /*
2078 * Update the Older Version Querier Present timers for a link.
2079 * See Section 7.2.1 of RFC 3376.
2080 */
2081 static void
2082 igmp_v1v2_process_querier_timers(struct igmp_ifinfo *igi)
2083 {
2084
2085 IGMP_LOCK_ASSERT();
2086
2087 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) {
2088 /*
2089 * IGMPv1 and IGMPv2 Querier Present timers expired.
2090 *
2091 * Revert to IGMPv3.
2092 */
2093 if (igi->igi_version != IGMP_VERSION_3) {
2094 CTR5(KTR_IGMPV3,
2095 "%s: transition from v%d -> v%d on %p(%s)",
2096 __func__, igi->igi_version, IGMP_VERSION_3,
2097 igi->igi_ifp, igi->igi_ifp->if_xname);
2098 igi->igi_version = IGMP_VERSION_3;
2099 }
2100 } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2101 /*
2102 * IGMPv1 Querier Present timer expired,
2103 * IGMPv2 Querier Present timer running.
2104 * If IGMPv2 was disabled since last timeout,
2105 * revert to IGMPv3.
2106 * If IGMPv2 is enabled, revert to IGMPv2.
2107 */
2108 if (!V_igmp_v2enable) {
2109 CTR5(KTR_IGMPV3,
2110 "%s: transition from v%d -> v%d on %p(%s)",
2111 __func__, igi->igi_version, IGMP_VERSION_3,
2112 igi->igi_ifp, igi->igi_ifp->if_xname);
2113 igi->igi_v2_timer = 0;
2114 igi->igi_version = IGMP_VERSION_3;
2115 } else {
2116 --igi->igi_v2_timer;
2117 if (igi->igi_version != IGMP_VERSION_2) {
2118 CTR5(KTR_IGMPV3,
2119 "%s: transition from v%d -> v%d on %p(%s)",
2120 __func__, igi->igi_version, IGMP_VERSION_2,
2121 igi->igi_ifp, igi->igi_ifp->if_xname);
2122 igi->igi_version = IGMP_VERSION_2;
2123 }
2124 }
2125 } else if (igi->igi_v1_timer > 0) {
2126 /*
2127 * IGMPv1 Querier Present timer running.
2128 * Stop IGMPv2 timer if running.
2129 *
2130 * If IGMPv1 was disabled since last timeout,
2131 * revert to IGMPv3.
2132 * If IGMPv1 is enabled, reset IGMPv2 timer if running.
2133 */
2134 if (!V_igmp_v1enable) {
2135 CTR5(KTR_IGMPV3,
2136 "%s: transition from v%d -> v%d on %p(%s)",
2137 __func__, igi->igi_version, IGMP_VERSION_3,
2138 igi->igi_ifp, igi->igi_ifp->if_xname);
2139 igi->igi_v1_timer = 0;
2140 igi->igi_version = IGMP_VERSION_3;
2141 } else {
2142 --igi->igi_v1_timer;
2143 }
2144 if (igi->igi_v2_timer > 0) {
2145 CTR3(KTR_IGMPV3,
2146 "%s: cancel v2 timer on %p(%s)",
2147 __func__, igi->igi_ifp, igi->igi_ifp->if_xname);
2148 igi->igi_v2_timer = 0;
2149 }
2150 }
2151 }
2152
2153 /*
2154 * Global slowtimo handler.
2155 * VIMAGE: Timeout handlers are expected to service all vimages.
2156 */
2157 void
2158 igmp_slowtimo(void)
2159 {
2160 VNET_ITERATOR_DECL(vnet_iter);
2161
2162 VNET_LIST_RLOCK_NOSLEEP();
2163 VNET_FOREACH(vnet_iter) {
2164 CURVNET_SET(vnet_iter);
2165 igmp_slowtimo_vnet();
2166 CURVNET_RESTORE();
2167 }
2168 VNET_LIST_RUNLOCK_NOSLEEP();
2169 }
2170
2171 /*
2172 * Per-vnet slowtimo handler.
2173 */
2174 static void
2175 igmp_slowtimo_vnet(void)
2176 {
2177 struct igmp_ifinfo *igi;
2178
2179 IGMP_LOCK();
2180
2181 LIST_FOREACH(igi, &V_igi_head, igi_link) {
2182 igmp_v1v2_process_querier_timers(igi);
2183 }
2184
2185 IGMP_UNLOCK();
2186 }
2187
2188 /*
2189 * Dispatch an IGMPv1/v2 host report or leave message.
2190 * These are always small enough to fit inside a single mbuf.
2191 */
2192 static int
2193 igmp_v1v2_queue_report(struct in_multi *inm, const int type)
2194 {
2195 struct ifnet *ifp;
2196 struct igmp *igmp;
2197 struct ip *ip;
2198 struct mbuf *m;
2199
2200 IN_MULTI_LOCK_ASSERT();
2201 IGMP_LOCK_ASSERT();
2202
2203 ifp = inm->inm_ifp;
2204
2205 MGETHDR(m, M_DONTWAIT, MT_DATA);
2206 if (m == NULL)
2207 return (ENOMEM);
2208 MH_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp));
2209
2210 m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp);
2211
2212 m->m_data += sizeof(struct ip);
2213 m->m_len = sizeof(struct igmp);
2214
2215 igmp = mtod(m, struct igmp *);
2216 igmp->igmp_type = type;
2217 igmp->igmp_code = 0;
2218 igmp->igmp_group = inm->inm_addr;
2219 igmp->igmp_cksum = 0;
2220 igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp));
2221
2222 m->m_data -= sizeof(struct ip);
2223 m->m_len += sizeof(struct ip);
2224
2225 ip = mtod(m, struct ip *);
2226 ip->ip_tos = 0;
2227 ip->ip_len = sizeof(struct ip) + sizeof(struct igmp);
2228 ip->ip_off = 0;
2229 ip->ip_p = IPPROTO_IGMP;
2230 ip->ip_src.s_addr = INADDR_ANY;
2231
2232 if (type == IGMP_HOST_LEAVE_MESSAGE)
2233 ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP);
2234 else
2235 ip->ip_dst = inm->inm_addr;
2236
2237 igmp_save_context(m, ifp);
2238
2239 m->m_flags |= M_IGMPV2;
2240 if (inm->inm_igi->igi_flags & IGIF_LOOPBACK)
2241 m->m_flags |= M_IGMP_LOOP;
2242
2243 CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m);
2244 netisr_dispatch(NETISR_IGMP, m);
2245
2246 return (0);
2247 }
2248
2249 /*
2250 * Process a state change from the upper layer for the given IPv4 group.
2251 *
2252 * Each socket holds a reference on the in_multi in its own ip_moptions.
2253 * The socket layer will have made the necessary updates to.the group
2254 * state, it is now up to IGMP to issue a state change report if there
2255 * has been any change between T0 (when the last state-change was issued)
2256 * and T1 (now).
2257 *
2258 * We use the IGMPv3 state machine at group level. The IGMP module
2259 * however makes the decision as to which IGMP protocol version to speak.
2260 * A state change *from* INCLUDE {} always means an initial join.
2261 * A state change *to* INCLUDE {} always means a final leave.
2262 *
2263 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can
2264 * save ourselves a bunch of work; any exclusive mode groups need not
2265 * compute source filter lists.
2266 *
2267 * VIMAGE: curvnet should have been set by caller, as this routine
2268 * is called from the socket option handlers.
2269 */
2270 int
2271 igmp_change_state(struct in_multi *inm)
2272 {
2273 struct igmp_ifinfo *igi;
2274 struct ifnet *ifp;
2275 int error;
2276
2277 IN_MULTI_LOCK_ASSERT();
2278
2279 error = 0;
2280
2281 /*
2282 * Try to detect if the upper layer just asked us to change state
2283 * for an interface which has now gone away.
2284 */
2285 KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__));
2286 ifp = inm->inm_ifma->ifma_ifp;
2287 if (ifp != NULL) {
2288 /*
2289 * Sanity check that netinet's notion of ifp is the
2290 * same as net's.
2291 */
2292 KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__));
2293 }
2294
2295 IGMP_LOCK();
2296
2297 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
2298 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
2299
2300 /*
2301 * If we detect a state transition to or from MCAST_UNDEFINED
2302 * for this group, then we are starting or finishing an IGMP
2303 * life cycle for this group.
2304 */
2305 if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) {
2306 CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__,
2307 inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode);
2308 if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) {
2309 CTR1(KTR_IGMPV3, "%s: initial join", __func__);
2310 error = igmp_initial_join(inm, igi);
2311 goto out_locked;
2312 } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) {
2313 CTR1(KTR_IGMPV3, "%s: final leave", __func__);
2314 igmp_final_leave(inm, igi);
2315 goto out_locked;
2316 }
2317 } else {
2318 CTR1(KTR_IGMPV3, "%s: filter set change", __func__);
2319 }
2320
2321 error = igmp_handle_state_change(inm, igi);
2322
2323 out_locked:
2324 IGMP_UNLOCK();
2325 return (error);
2326 }
2327
2328 /*
2329 * Perform the initial join for an IGMP group.
2330 *
2331 * When joining a group:
2332 * If the group should have its IGMP traffic suppressed, do nothing.
2333 * IGMPv1 starts sending IGMPv1 host membership reports.
2334 * IGMPv2 starts sending IGMPv2 host membership reports.
2335 * IGMPv3 will schedule an IGMPv3 state-change report containing the
2336 * initial state of the membership.
2337 */
2338 static int
2339 igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi)
2340 {
2341 struct ifnet *ifp;
2342 struct ifqueue *ifq;
2343 int error, retval, syncstates;
2344
2345 CTR4(KTR_IGMPV3, "%s: initial join %s on ifp %p(%s)",
2346 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
2347 inm->inm_ifp->if_xname);
2348
2349 error = 0;
2350 syncstates = 1;
2351
2352 ifp = inm->inm_ifp;
2353
2354 IN_MULTI_LOCK_ASSERT();
2355 IGMP_LOCK_ASSERT();
2356
2357 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2358
2359 /*
2360 * Groups joined on loopback or marked as 'not reported',
2361 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and
2362 * are never reported in any IGMP protocol exchanges.
2363 * All other groups enter the appropriate IGMP state machine
2364 * for the version in use on this link.
2365 * A link marked as IGIF_SILENT causes IGMP to be completely
2366 * disabled for the link.
2367 */
2368 if ((ifp->if_flags & IFF_LOOPBACK) ||
2369 (igi->igi_flags & IGIF_SILENT) ||
2370 !igmp_isgroupreported(inm->inm_addr)) {
2371 CTR1(KTR_IGMPV3,
2372 "%s: not kicking state machine for silent group", __func__);
2373 inm->inm_state = IGMP_SILENT_MEMBER;
2374 inm->inm_timer = 0;
2375 } else {
2376 /*
2377 * Deal with overlapping in_multi lifecycle.
2378 * If this group was LEAVING, then make sure
2379 * we drop the reference we picked up to keep the
2380 * group around for the final INCLUDE {} enqueue.
2381 */
2382 if (igi->igi_version == IGMP_VERSION_3 &&
2383 inm->inm_state == IGMP_LEAVING_MEMBER)
2384 inm_release_locked(inm);
2385
2386 inm->inm_state = IGMP_REPORTING_MEMBER;
2387
2388 switch (igi->igi_version) {
2389 case IGMP_VERSION_1:
2390 case IGMP_VERSION_2:
2391 inm->inm_state = IGMP_IDLE_MEMBER;
2392 error = igmp_v1v2_queue_report(inm,
2393 (igi->igi_version == IGMP_VERSION_2) ?
2394 IGMP_v2_HOST_MEMBERSHIP_REPORT :
2395 IGMP_v1_HOST_MEMBERSHIP_REPORT);
2396 if (error == 0) {
2397 inm->inm_timer = IGMP_RANDOM_DELAY(
2398 IGMP_V1V2_MAX_RI * PR_FASTHZ);
2399 V_current_state_timers_running = 1;
2400 }
2401 break;
2402
2403 case IGMP_VERSION_3:
2404 /*
2405 * Defer update of T0 to T1, until the first copy
2406 * of the state change has been transmitted.
2407 */
2408 syncstates = 0;
2409
2410 /*
2411 * Immediately enqueue a State-Change Report for
2412 * this interface, freeing any previous reports.
2413 * Don't kick the timers if there is nothing to do,
2414 * or if an error occurred.
2415 */
2416 ifq = &inm->inm_scq;
2417 _IF_DRAIN(ifq);
2418 retval = igmp_v3_enqueue_group_record(ifq, inm, 1,
2419 0, 0);
2420 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
2421 __func__, retval);
2422 if (retval <= 0) {
2423 error = retval * -1;
2424 break;
2425 }
2426
2427 /*
2428 * Schedule transmission of pending state-change
2429 * report up to RV times for this link. The timer
2430 * will fire at the next igmp_fasttimo (~200ms),
2431 * giving us an opportunity to merge the reports.
2432 */
2433 if (igi->igi_flags & IGIF_LOOPBACK) {
2434 inm->inm_scrv = 1;
2435 } else {
2436 KASSERT(igi->igi_rv > 1,
2437 ("%s: invalid robustness %d", __func__,
2438 igi->igi_rv));
2439 inm->inm_scrv = igi->igi_rv;
2440 }
2441 inm->inm_sctimer = 1;
2442 V_state_change_timers_running = 1;
2443
2444 error = 0;
2445 break;
2446 }
2447 }
2448
2449 /*
2450 * Only update the T0 state if state change is atomic,
2451 * i.e. we don't need to wait for a timer to fire before we
2452 * can consider the state change to have been communicated.
2453 */
2454 if (syncstates) {
2455 inm_commit(inm);
2456 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
2457 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2458 }
2459
2460 return (error);
2461 }
2462
2463 /*
2464 * Issue an intermediate state change during the IGMP life-cycle.
2465 */
2466 static int
2467 igmp_handle_state_change(struct in_multi *inm, struct igmp_ifinfo *igi)
2468 {
2469 struct ifnet *ifp;
2470 int retval;
2471
2472 CTR4(KTR_IGMPV3, "%s: state change for %s on ifp %p(%s)",
2473 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
2474 inm->inm_ifp->if_xname);
2475
2476 ifp = inm->inm_ifp;
2477
2478 IN_MULTI_LOCK_ASSERT();
2479 IGMP_LOCK_ASSERT();
2480
2481 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2482
2483 if ((ifp->if_flags & IFF_LOOPBACK) ||
2484 (igi->igi_flags & IGIF_SILENT) ||
2485 !igmp_isgroupreported(inm->inm_addr) ||
2486 (igi->igi_version != IGMP_VERSION_3)) {
2487 if (!igmp_isgroupreported(inm->inm_addr)) {
2488 CTR1(KTR_IGMPV3,
2489 "%s: not kicking state machine for silent group", __func__);
2490 }
2491 CTR1(KTR_IGMPV3, "%s: nothing to do", __func__);
2492 inm_commit(inm);
2493 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
2494 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2495 return (0);
2496 }
2497
2498 _IF_DRAIN(&inm->inm_scq);
2499
2500 retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0);
2501 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval);
2502 if (retval <= 0)
2503 return (-retval);
2504
2505 /*
2506 * If record(s) were enqueued, start the state-change
2507 * report timer for this group.
2508 */
2509 inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv);
2510 inm->inm_sctimer = 1;
2511 V_state_change_timers_running = 1;
2512
2513 return (0);
2514 }
2515
2516 /*
2517 * Perform the final leave for an IGMP group.
2518 *
2519 * When leaving a group:
2520 * IGMPv1 does nothing.
2521 * IGMPv2 sends a host leave message, if and only if we are the reporter.
2522 * IGMPv3 enqueues a state-change report containing a transition
2523 * to INCLUDE {} for immediate transmission.
2524 */
2525 static void
2526 igmp_final_leave(struct in_multi *inm, struct igmp_ifinfo *igi)
2527 {
2528 int syncstates;
2529
2530 syncstates = 1;
2531
2532 CTR4(KTR_IGMPV3, "%s: final leave %s on ifp %p(%s)",
2533 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
2534 inm->inm_ifp->if_xname);
2535
2536 IN_MULTI_LOCK_ASSERT();
2537 IGMP_LOCK_ASSERT();
2538
2539 switch (inm->inm_state) {
2540 case IGMP_NOT_MEMBER:
2541 case IGMP_SILENT_MEMBER:
2542 case IGMP_LEAVING_MEMBER:
2543 /* Already leaving or left; do nothing. */
2544 CTR1(KTR_IGMPV3,
2545 "%s: not kicking state machine for silent group", __func__);
2546 break;
2547 case IGMP_REPORTING_MEMBER:
2548 case IGMP_IDLE_MEMBER:
2549 case IGMP_G_QUERY_PENDING_MEMBER:
2550 case IGMP_SG_QUERY_PENDING_MEMBER:
2551 if (igi->igi_version == IGMP_VERSION_2) {
2552 #ifdef INVARIANTS
2553 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
2554 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
2555 panic("%s: IGMPv3 state reached, not IGMPv3 mode",
2556 __func__);
2557 #endif
2558 igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE);
2559 inm->inm_state = IGMP_NOT_MEMBER;
2560 } else if (igi->igi_version == IGMP_VERSION_3) {
2561 /*
2562 * Stop group timer and all pending reports.
2563 * Immediately enqueue a state-change report
2564 * TO_IN {} to be sent on the next fast timeout,
2565 * giving us an opportunity to merge reports.
2566 */
2567 _IF_DRAIN(&inm->inm_scq);
2568 inm->inm_timer = 0;
2569 if (igi->igi_flags & IGIF_LOOPBACK) {
2570 inm->inm_scrv = 1;
2571 } else {
2572 inm->inm_scrv = igi->igi_rv;
2573 }
2574 CTR4(KTR_IGMPV3, "%s: Leaving %s/%s with %d "
2575 "pending retransmissions.", __func__,
2576 inet_ntoa(inm->inm_addr),
2577 inm->inm_ifp->if_xname, inm->inm_scrv);
2578 if (inm->inm_scrv == 0) {
2579 inm->inm_state = IGMP_NOT_MEMBER;
2580 inm->inm_sctimer = 0;
2581 } else {
2582 int retval;
2583
2584 inm_acquire_locked(inm);
2585
2586 retval = igmp_v3_enqueue_group_record(
2587 &inm->inm_scq, inm, 1, 0, 0);
2588 KASSERT(retval != 0,
2589 ("%s: enqueue record = %d", __func__,
2590 retval));
2591
2592 inm->inm_state = IGMP_LEAVING_MEMBER;
2593 inm->inm_sctimer = 1;
2594 V_state_change_timers_running = 1;
2595 syncstates = 0;
2596 }
2597 break;
2598 }
2599 break;
2600 case IGMP_LAZY_MEMBER:
2601 case IGMP_SLEEPING_MEMBER:
2602 case IGMP_AWAKENING_MEMBER:
2603 /* Our reports are suppressed; do nothing. */
2604 break;
2605 }
2606
2607 if (syncstates) {
2608 inm_commit(inm);
2609 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
2610 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2611 inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
2612 CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for %s/%s",
2613 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2614 }
2615 }
2616
2617 /*
2618 * Enqueue an IGMPv3 group record to the given output queue.
2619 *
2620 * XXX This function could do with having the allocation code
2621 * split out, and the multiple-tree-walks coalesced into a single
2622 * routine as has been done in igmp_v3_enqueue_filter_change().
2623 *
2624 * If is_state_change is zero, a current-state record is appended.
2625 * If is_state_change is non-zero, a state-change report is appended.
2626 *
2627 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2628 * If is_group_query is zero, and if there is a packet with free space
2629 * at the tail of the queue, it will be appended to providing there
2630 * is enough free space.
2631 * Otherwise a new mbuf packet chain is allocated.
2632 *
2633 * If is_source_query is non-zero, each source is checked to see if
2634 * it was recorded for a Group-Source query, and will be omitted if
2635 * it is not both in-mode and recorded.
2636 *
2637 * The function will attempt to allocate leading space in the packet
2638 * for the IP/IGMP header to be prepended without fragmenting the chain.
2639 *
2640 * If successful the size of all data appended to the queue is returned,
2641 * otherwise an error code less than zero is returned, or zero if
2642 * no record(s) were appended.
2643 */
2644 static int
2645 igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm,
2646 const int is_state_change, const int is_group_query,
2647 const int is_source_query)
2648 {
2649 struct igmp_grouprec ig;
2650 struct igmp_grouprec *pig;
2651 struct ifnet *ifp;
2652 struct ip_msource *ims, *nims;
2653 struct mbuf *m0, *m, *md;
2654 int error, is_filter_list_change;
2655 int minrec0len, m0srcs, msrcs, nbytes, off;
2656 int record_has_sources;
2657 int now;
2658 int type;
2659 in_addr_t naddr;
2660 uint8_t mode;
2661
2662 IN_MULTI_LOCK_ASSERT();
2663
2664 error = 0;
2665 ifp = inm->inm_ifp;
2666 is_filter_list_change = 0;
2667 m = NULL;
2668 m0 = NULL;
2669 m0srcs = 0;
2670 msrcs = 0;
2671 nbytes = 0;
2672 nims = NULL;
2673 record_has_sources = 1;
2674 pig = NULL;
2675 type = IGMP_DO_NOTHING;
2676 mode = inm->inm_st[1].iss_fmode;
2677
2678 /*
2679 * If we did not transition out of ASM mode during t0->t1,
2680 * and there are no source nodes to process, we can skip
2681 * the generation of source records.
2682 */
2683 if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 &&
2684 inm->inm_nsrc == 0)
2685 record_has_sources = 0;
2686
2687 if (is_state_change) {
2688 /*
2689 * Queue a state change record.
2690 * If the mode did not change, and there are non-ASM
2691 * listeners or source filters present,
2692 * we potentially need to issue two records for the group.
2693 * If we are transitioning to MCAST_UNDEFINED, we need
2694 * not send any sources.
2695 * If there are ASM listeners, and there was no filter
2696 * mode transition of any kind, do nothing.
2697 */
2698 if (mode != inm->inm_st[0].iss_fmode) {
2699 if (mode == MCAST_EXCLUDE) {
2700 CTR1(KTR_IGMPV3, "%s: change to EXCLUDE",
2701 __func__);
2702 type = IGMP_CHANGE_TO_EXCLUDE_MODE;
2703 } else {
2704 CTR1(KTR_IGMPV3, "%s: change to INCLUDE",
2705 __func__);
2706 type = IGMP_CHANGE_TO_INCLUDE_MODE;
2707 if (mode == MCAST_UNDEFINED)
2708 record_has_sources = 0;
2709 }
2710 } else {
2711 if (record_has_sources) {
2712 is_filter_list_change = 1;
2713 } else {
2714 type = IGMP_DO_NOTHING;
2715 }
2716 }
2717 } else {
2718 /*
2719 * Queue a current state record.
2720 */
2721 if (mode == MCAST_EXCLUDE) {
2722 type = IGMP_MODE_IS_EXCLUDE;
2723 } else if (mode == MCAST_INCLUDE) {
2724 type = IGMP_MODE_IS_INCLUDE;
2725 KASSERT(inm->inm_st[1].iss_asm == 0,
2726 ("%s: inm %p is INCLUDE but ASM count is %d",
2727 __func__, inm, inm->inm_st[1].iss_asm));
2728 }
2729 }
2730
2731 /*
2732 * Generate the filter list changes using a separate function.
2733 */
2734 if (is_filter_list_change)
2735 return (igmp_v3_enqueue_filter_change(ifq, inm));
2736
2737 if (type == IGMP_DO_NOTHING) {
2738 CTR3(KTR_IGMPV3, "%s: nothing to do for %s/%s",
2739 __func__, inet_ntoa(inm->inm_addr),
2740 inm->inm_ifp->if_xname);
2741 return (0);
2742 }
2743
2744 /*
2745 * If any sources are present, we must be able to fit at least
2746 * one in the trailing space of the tail packet's mbuf,
2747 * ideally more.
2748 */
2749 minrec0len = sizeof(struct igmp_grouprec);
2750 if (record_has_sources)
2751 minrec0len += sizeof(in_addr_t);
2752
2753 CTR4(KTR_IGMPV3, "%s: queueing %s for %s/%s", __func__,
2754 igmp_rec_type_to_str(type), inet_ntoa(inm->inm_addr),
2755 inm->inm_ifp->if_xname);
2756
2757 /*
2758 * Check if we have a packet in the tail of the queue for this
2759 * group into which the first group record for this group will fit.
2760 * Otherwise allocate a new packet.
2761 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT.
2762 * Note: Group records for G/GSR query responses MUST be sent
2763 * in their own packet.
2764 */
2765 m0 = ifq->ifq_tail;
2766 if (!is_group_query &&
2767 m0 != NULL &&
2768 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) &&
2769 (m0->m_pkthdr.len + minrec0len) <
2770 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
2771 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2772 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2773 m = m0;
2774 CTR1(KTR_IGMPV3, "%s: use existing packet", __func__);
2775 } else {
2776 if (_IF_QFULL(ifq)) {
2777 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2778 return (-ENOMEM);
2779 }
2780 m = NULL;
2781 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2782 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2783 if (!is_state_change && !is_group_query) {
2784 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2785 if (m)
2786 m->m_data += IGMP_LEADINGSPACE;
2787 }
2788 if (m == NULL) {
2789 m = m_gethdr(M_DONTWAIT, MT_DATA);
2790 if (m)
2791 MH_ALIGN(m, IGMP_LEADINGSPACE);
2792 }
2793 if (m == NULL)
2794 return (-ENOMEM);
2795
2796 igmp_save_context(m, ifp);
2797
2798 CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__);
2799 }
2800
2801 /*
2802 * Append group record.
2803 * If we have sources, we don't know how many yet.
2804 */
2805 ig.ig_type = type;
2806 ig.ig_datalen = 0;
2807 ig.ig_numsrc = 0;
2808 ig.ig_group = inm->inm_addr;
2809 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2810 if (m != m0)
2811 m_freem(m);
2812 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2813 return (-ENOMEM);
2814 }
2815 nbytes += sizeof(struct igmp_grouprec);
2816
2817 /*
2818 * Append as many sources as will fit in the first packet.
2819 * If we are appending to a new packet, the chain allocation
2820 * may potentially use clusters; use m_getptr() in this case.
2821 * If we are appending to an existing packet, we need to obtain
2822 * a pointer to the group record after m_append(), in case a new
2823 * mbuf was allocated.
2824 * Only append sources which are in-mode at t1. If we are
2825 * transitioning to MCAST_UNDEFINED state on the group, do not
2826 * include source entries.
2827 * Only report recorded sources in our filter set when responding
2828 * to a group-source query.
2829 */
2830 if (record_has_sources) {
2831 if (m == m0) {
2832 md = m_last(m);
2833 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2834 md->m_len - nbytes);
2835 } else {
2836 md = m_getptr(m, 0, &off);
2837 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2838 off);
2839 }
2840 msrcs = 0;
2841 RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) {
2842 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__,
2843 inet_ntoa_haddr(ims->ims_haddr));
2844 now = ims_get_mode(inm, ims, 1);
2845 CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now);
2846 if ((now != mode) ||
2847 (now == mode && mode == MCAST_UNDEFINED)) {
2848 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2849 continue;
2850 }
2851 if (is_source_query && ims->ims_stp == 0) {
2852 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2853 __func__);
2854 continue;
2855 }
2856 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2857 naddr = htonl(ims->ims_haddr);
2858 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2859 if (m != m0)
2860 m_freem(m);
2861 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2862 __func__);
2863 return (-ENOMEM);
2864 }
2865 nbytes += sizeof(in_addr_t);
2866 ++msrcs;
2867 if (msrcs == m0srcs)
2868 break;
2869 }
2870 CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__,
2871 msrcs);
2872 pig->ig_numsrc = htons(msrcs);
2873 nbytes += (msrcs * sizeof(in_addr_t));
2874 }
2875
2876 if (is_source_query && msrcs == 0) {
2877 CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__);
2878 if (m != m0)
2879 m_freem(m);
2880 return (0);
2881 }
2882
2883 /*
2884 * We are good to go with first packet.
2885 */
2886 if (m != m0) {
2887 CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__);
2888 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2889 _IF_ENQUEUE(ifq, m);
2890 } else
2891 m->m_pkthdr.PH_vt.vt_nrecs++;
2892
2893 /*
2894 * No further work needed if no source list in packet(s).
2895 */
2896 if (!record_has_sources)
2897 return (nbytes);
2898
2899 /*
2900 * Whilst sources remain to be announced, we need to allocate
2901 * a new packet and fill out as many sources as will fit.
2902 * Always try for a cluster first.
2903 */
2904 while (nims != NULL) {
2905 if (_IF_QFULL(ifq)) {
2906 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2907 return (-ENOMEM);
2908 }
2909 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2910 if (m)
2911 m->m_data += IGMP_LEADINGSPACE;
2912 if (m == NULL) {
2913 m = m_gethdr(M_DONTWAIT, MT_DATA);
2914 if (m)
2915 MH_ALIGN(m, IGMP_LEADINGSPACE);
2916 }
2917 if (m == NULL)
2918 return (-ENOMEM);
2919 igmp_save_context(m, ifp);
2920 md = m_getptr(m, 0, &off);
2921 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off);
2922 CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__);
2923
2924 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2925 if (m != m0)
2926 m_freem(m);
2927 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2928 return (-ENOMEM);
2929 }
2930 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2931 nbytes += sizeof(struct igmp_grouprec);
2932
2933 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2934 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2935
2936 msrcs = 0;
2937 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
2938 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__,
2939 inet_ntoa_haddr(ims->ims_haddr));
2940 now = ims_get_mode(inm, ims, 1);
2941 if ((now != mode) ||
2942 (now == mode && mode == MCAST_UNDEFINED)) {
2943 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2944 continue;
2945 }
2946 if (is_source_query && ims->ims_stp == 0) {
2947 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2948 __func__);
2949 continue;
2950 }
2951 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2952 naddr = htonl(ims->ims_haddr);
2953 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2954 if (m != m0)
2955 m_freem(m);
2956 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2957 __func__);
2958 return (-ENOMEM);
2959 }
2960 ++msrcs;
2961 if (msrcs == m0srcs)
2962 break;
2963 }
2964 pig->ig_numsrc = htons(msrcs);
2965 nbytes += (msrcs * sizeof(in_addr_t));
2966
2967 CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__);
2968 _IF_ENQUEUE(ifq, m);
2969 }
2970
2971 return (nbytes);
2972 }
2973
2974 /*
2975 * Type used to mark record pass completion.
2976 * We exploit the fact we can cast to this easily from the
2977 * current filter modes on each ip_msource node.
2978 */
2979 typedef enum {
2980 REC_NONE = 0x00, /* MCAST_UNDEFINED */
2981 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
2982 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
2983 REC_FULL = REC_ALLOW | REC_BLOCK
2984 } rectype_t;
2985
2986 /*
2987 * Enqueue an IGMPv3 filter list change to the given output queue.
2988 *
2989 * Source list filter state is held in an RB-tree. When the filter list
2990 * for a group is changed without changing its mode, we need to compute
2991 * the deltas between T0 and T1 for each source in the filter set,
2992 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
2993 *
2994 * As we may potentially queue two record types, and the entire R-B tree
2995 * needs to be walked at once, we break this out into its own function
2996 * so we can generate a tightly packed queue of packets.
2997 *
2998 * XXX This could be written to only use one tree walk, although that makes
2999 * serializing into the mbuf chains a bit harder. For now we do two walks
3000 * which makes things easier on us, and it may or may not be harder on
3001 * the L2 cache.
3002 *
3003 * If successful the size of all data appended to the queue is returned,
3004 * otherwise an error code less than zero is returned, or zero if
3005 * no record(s) were appended.
3006 */
3007 static int
3008 igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm)
3009 {
3010 static const int MINRECLEN =
3011 sizeof(struct igmp_grouprec) + sizeof(in_addr_t);
3012 struct ifnet *ifp;
3013 struct igmp_grouprec ig;
3014 struct igmp_grouprec *pig;
3015 struct ip_msource *ims, *nims;
3016 struct mbuf *m, *m0, *md;
3017 in_addr_t naddr;
3018 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
3019 int nallow, nblock;
3020 uint8_t mode, now, then;
3021 rectype_t crt, drt, nrt;
3022
3023 IN_MULTI_LOCK_ASSERT();
3024
3025 if (inm->inm_nsrc == 0 ||
3026 (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0))
3027 return (0);
3028
3029 ifp = inm->inm_ifp; /* interface */
3030 mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */
3031 crt = REC_NONE; /* current group record type */
3032 drt = REC_NONE; /* mask of completed group record types */
3033 nrt = REC_NONE; /* record type for current node */
3034 m0srcs = 0; /* # source which will fit in current mbuf chain */
3035 nbytes = 0; /* # of bytes appended to group's state-change queue */
3036 npbytes = 0; /* # of bytes appended this packet */
3037 rsrcs = 0; /* # sources encoded in current record */
3038 schanged = 0; /* # nodes encoded in overall filter change */
3039 nallow = 0; /* # of source entries in ALLOW_NEW */
3040 nblock = 0; /* # of source entries in BLOCK_OLD */
3041 nims = NULL; /* next tree node pointer */
3042
3043 /*
3044 * For each possible filter record mode.
3045 * The first kind of source we encounter tells us which
3046 * is the first kind of record we start appending.
3047 * If a node transitioned to UNDEFINED at t1, its mode is treated
3048 * as the inverse of the group's filter mode.
3049 */
3050 while (drt != REC_FULL) {
3051 do {
3052 m0 = ifq->ifq_tail;
3053 if (m0 != NULL &&
3054 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
3055 IGMP_V3_REPORT_MAXRECS) &&
3056 (m0->m_pkthdr.len + MINRECLEN) <
3057 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
3058 m = m0;
3059 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3060 sizeof(struct igmp_grouprec)) /
3061 sizeof(in_addr_t);
3062 CTR1(KTR_IGMPV3,
3063 "%s: use previous packet", __func__);
3064 } else {
3065 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3066 if (m)
3067 m->m_data += IGMP_LEADINGSPACE;
3068 if (m == NULL) {
3069 m = m_gethdr(M_DONTWAIT, MT_DATA);
3070 if (m)
3071 MH_ALIGN(m, IGMP_LEADINGSPACE);
3072 }
3073 if (m == NULL) {
3074 CTR1(KTR_IGMPV3,
3075 "%s: m_get*() failed", __func__);
3076 return (-ENOMEM);
3077 }
3078 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3079 igmp_save_context(m, ifp);
3080 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
3081 sizeof(struct igmp_grouprec)) /
3082 sizeof(in_addr_t);
3083 npbytes = 0;
3084 CTR1(KTR_IGMPV3,
3085 "%s: allocated new packet", __func__);
3086 }
3087 /*
3088 * Append the IGMP group record header to the
3089 * current packet's data area.
3090 * Recalculate pointer to free space for next
3091 * group record, in case m_append() allocated
3092 * a new mbuf or cluster.
3093 */
3094 memset(&ig, 0, sizeof(ig));
3095 ig.ig_group = inm->inm_addr;
3096 if (!m_append(m, sizeof(ig), (void *)&ig)) {
3097 if (m != m0)
3098 m_freem(m);
3099 CTR1(KTR_IGMPV3,
3100 "%s: m_append() failed", __func__);
3101 return (-ENOMEM);
3102 }
3103 npbytes += sizeof(struct igmp_grouprec);
3104 if (m != m0) {
3105 /* new packet; offset in c hain */
3106 md = m_getptr(m, npbytes -
3107 sizeof(struct igmp_grouprec), &off);
3108 pig = (struct igmp_grouprec *)(mtod(md,
3109 uint8_t *) + off);
3110 } else {
3111 /* current packet; offset from last append */
3112 md = m_last(m);
3113 pig = (struct igmp_grouprec *)(mtod(md,
3114 uint8_t *) + md->m_len -
3115 sizeof(struct igmp_grouprec));
3116 }
3117 /*
3118 * Begin walking the tree for this record type
3119 * pass, or continue from where we left off
3120 * previously if we had to allocate a new packet.
3121 * Only report deltas in-mode at t1.
3122 * We need not report included sources as allowed
3123 * if we are in inclusive mode on the group,
3124 * however the converse is not true.
3125 */
3126 rsrcs = 0;
3127 if (nims == NULL)
3128 nims = RB_MIN(ip_msource_tree, &inm->inm_srcs);
3129 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
3130 CTR2(KTR_IGMPV3, "%s: visit node %s",
3131 __func__, inet_ntoa_haddr(ims->ims_haddr));
3132 now = ims_get_mode(inm, ims, 1);
3133 then = ims_get_mode(inm, ims, 0);
3134 CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d",
3135 __func__, then, now);
3136 if (now == then) {
3137 CTR1(KTR_IGMPV3,
3138 "%s: skip unchanged", __func__);
3139 continue;
3140 }
3141 if (mode == MCAST_EXCLUDE &&
3142 now == MCAST_INCLUDE) {
3143 CTR1(KTR_IGMPV3,
3144 "%s: skip IN src on EX group",
3145 __func__);
3146 continue;
3147 }
3148 nrt = (rectype_t)now;
3149 if (nrt == REC_NONE)
3150 nrt = (rectype_t)(~mode & REC_FULL);
3151 if (schanged++ == 0) {
3152 crt = nrt;
3153 } else if (crt != nrt)
3154 continue;
3155 naddr = htonl(ims->ims_haddr);
3156 if (!m_append(m, sizeof(in_addr_t),
3157 (void *)&naddr)) {
3158 if (m != m0)
3159 m_freem(m);
3160 CTR1(KTR_IGMPV3,
3161 "%s: m_append() failed", __func__);
3162 return (-ENOMEM);
3163 }
3164 nallow += !!(crt == REC_ALLOW);
3165 nblock += !!(crt == REC_BLOCK);
3166 if (++rsrcs == m0srcs)
3167 break;
3168 }
3169 /*
3170 * If we did not append any tree nodes on this
3171 * pass, back out of allocations.
3172 */
3173 if (rsrcs == 0) {
3174 npbytes -= sizeof(struct igmp_grouprec);
3175 if (m != m0) {
3176 CTR1(KTR_IGMPV3,
3177 "%s: m_free(m)", __func__);
3178 m_freem(m);
3179 } else {
3180 CTR1(KTR_IGMPV3,
3181 "%s: m_adj(m, -ig)", __func__);
3182 m_adj(m, -((int)sizeof(
3183 struct igmp_grouprec)));
3184 }
3185 continue;
3186 }
3187 npbytes += (rsrcs * sizeof(in_addr_t));
3188 if (crt == REC_ALLOW)
3189 pig->ig_type = IGMP_ALLOW_NEW_SOURCES;
3190 else if (crt == REC_BLOCK)
3191 pig->ig_type = IGMP_BLOCK_OLD_SOURCES;
3192 pig->ig_numsrc = htons(rsrcs);
3193 /*
3194 * Count the new group record, and enqueue this
3195 * packet if it wasn't already queued.
3196 */
3197 m->m_pkthdr.PH_vt.vt_nrecs++;
3198 if (m != m0)
3199 _IF_ENQUEUE(ifq, m);
3200 nbytes += npbytes;
3201 } while (nims != NULL);
3202 drt |= crt;
3203 crt = (~crt & REC_FULL);
3204 }
3205
3206 CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
3207 nallow, nblock);
3208
3209 return (nbytes);
3210 }
3211
3212 static int
3213 igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq)
3214 {
3215 struct ifqueue *gq;
3216 struct mbuf *m; /* pending state-change */
3217 struct mbuf *m0; /* copy of pending state-change */
3218 struct mbuf *mt; /* last state-change in packet */
3219 int docopy, domerge;
3220 u_int recslen;
3221
3222 docopy = 0;
3223 domerge = 0;
3224 recslen = 0;
3225
3226 IN_MULTI_LOCK_ASSERT();
3227 IGMP_LOCK_ASSERT();
3228
3229 /*
3230 * If there are further pending retransmissions, make a writable
3231 * copy of each queued state-change message before merging.
3232 */
3233 if (inm->inm_scrv > 0)
3234 docopy = 1;
3235
3236 gq = &inm->inm_scq;
3237 #ifdef KTR
3238 if (gq->ifq_head == NULL) {
3239 CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty",
3240 __func__, inm);
3241 }
3242 #endif
3243
3244 m = gq->ifq_head;
3245 while (m != NULL) {
3246 /*
3247 * Only merge the report into the current packet if
3248 * there is sufficient space to do so; an IGMPv3 report
3249 * packet may only contain 65,535 group records.
3250 * Always use a simple mbuf chain concatentation to do this,
3251 * as large state changes for single groups may have
3252 * allocated clusters.
3253 */
3254 domerge = 0;
3255 mt = ifscq->ifq_tail;
3256 if (mt != NULL) {
3257 recslen = m_length(m, NULL);
3258
3259 if ((mt->m_pkthdr.PH_vt.vt_nrecs +
3260 m->m_pkthdr.PH_vt.vt_nrecs <=
3261 IGMP_V3_REPORT_MAXRECS) &&
3262 (mt->m_pkthdr.len + recslen <=
3263 (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE)))
3264 domerge = 1;
3265 }
3266
3267 if (!domerge && _IF_QFULL(gq)) {
3268 CTR2(KTR_IGMPV3,
3269 "%s: outbound queue full, skipping whole packet %p",
3270 __func__, m);
3271 mt = m->m_nextpkt;
3272 if (!docopy)
3273 m_freem(m);
3274 m = mt;
3275 continue;
3276 }
3277
3278 if (!docopy) {
3279 CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m);
3280 _IF_DEQUEUE(gq, m0);
3281 m = m0->m_nextpkt;
3282 } else {
3283 CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m);
3284 m0 = m_dup(m, M_NOWAIT);
3285 if (m0 == NULL)
3286 return (ENOMEM);
3287 m0->m_nextpkt = NULL;
3288 m = m->m_nextpkt;
3289 }
3290
3291 if (!domerge) {
3292 CTR3(KTR_IGMPV3, "%s: queueing %p to ifscq %p)",
3293 __func__, m0, ifscq);
3294 _IF_ENQUEUE(ifscq, m0);
3295 } else {
3296 struct mbuf *mtl; /* last mbuf of packet mt */
3297
3298 CTR3(KTR_IGMPV3, "%s: merging %p with ifscq tail %p)",
3299 __func__, m0, mt);
3300
3301 mtl = m_last(mt);
3302 m0->m_flags &= ~M_PKTHDR;
3303 mt->m_pkthdr.len += recslen;
3304 mt->m_pkthdr.PH_vt.vt_nrecs +=
3305 m0->m_pkthdr.PH_vt.vt_nrecs;
3306
3307 mtl->m_next = m0;
3308 }
3309 }
3310
3311 return (0);
3312 }
3313
3314 /*
3315 * Respond to a pending IGMPv3 General Query.
3316 */
3317 static void
3318 igmp_v3_dispatch_general_query(struct igmp_ifinfo *igi)
3319 {
3320 struct ifmultiaddr *ifma;
3321 struct ifnet *ifp;
3322 struct in_multi *inm;
3323 int retval, loop;
3324
3325 IN_MULTI_LOCK_ASSERT();
3326 IGMP_LOCK_ASSERT();
3327
3328 KASSERT(igi->igi_version == IGMP_VERSION_3,
3329 ("%s: called when version %d", __func__, igi->igi_version));
3330
3331 ifp = igi->igi_ifp;
3332
3333 IF_ADDR_RLOCK(ifp);
3334 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3335 if (ifma->ifma_addr->sa_family != AF_INET ||
3336 ifma->ifma_protospec == NULL)
3337 continue;
3338
3339 inm = (struct in_multi *)ifma->ifma_protospec;
3340 KASSERT(ifp == inm->inm_ifp,
3341 ("%s: inconsistent ifp", __func__));
3342
3343 switch (inm->inm_state) {
3344 case IGMP_NOT_MEMBER:
3345 case IGMP_SILENT_MEMBER:
3346 break;
3347 case IGMP_REPORTING_MEMBER:
3348 case IGMP_IDLE_MEMBER:
3349 case IGMP_LAZY_MEMBER:
3350 case IGMP_SLEEPING_MEMBER:
3351 case IGMP_AWAKENING_MEMBER:
3352 inm->inm_state = IGMP_REPORTING_MEMBER;
3353 retval = igmp_v3_enqueue_group_record(&igi->igi_gq,
3354 inm, 0, 0, 0);
3355 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
3356 __func__, retval);
3357 break;
3358 case IGMP_G_QUERY_PENDING_MEMBER:
3359 case IGMP_SG_QUERY_PENDING_MEMBER:
3360 case IGMP_LEAVING_MEMBER:
3361 break;
3362 }
3363 }
3364 IF_ADDR_RUNLOCK(ifp);
3365
3366 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
3367 igmp_dispatch_queue(&igi->igi_gq, IGMP_MAX_RESPONSE_BURST, loop);
3368
3369 /*
3370 * Slew transmission of bursts over 500ms intervals.
3371 */
3372 if (igi->igi_gq.ifq_head != NULL) {
3373 igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY(
3374 IGMP_RESPONSE_BURST_INTERVAL);
3375 V_interface_timers_running = 1;
3376 }
3377 }
3378
3379 /*
3380 * Transmit the next pending IGMP message in the output queue.
3381 *
3382 * We get called from netisr_processqueue(). A mutex private to igmpoq
3383 * will be acquired and released around this routine.
3384 *
3385 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3386 * MRT: Nothing needs to be done, as IGMP traffic is always local to
3387 * a link and uses a link-scope multicast address.
3388 */
3389 static void
3390 igmp_intr(struct mbuf *m)
3391 {
3392 struct ip_moptions imo;
3393 struct ifnet *ifp;
3394 struct mbuf *ipopts, *m0;
3395 int error;
3396 uint32_t ifindex;
3397
3398 CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m);
3399
3400 /*
3401 * Set VNET image pointer from enqueued mbuf chain
3402 * before doing anything else. Whilst we use interface
3403 * indexes to guard against interface detach, they are
3404 * unique to each VIMAGE and must be retrieved.
3405 */
3406 CURVNET_SET((struct vnet *)(m->m_pkthdr.header));
3407 ifindex = igmp_restore_context(m);
3408
3409 /*
3410 * Check if the ifnet still exists. This limits the scope of
3411 * any race in the absence of a global ifp lock for low cost
3412 * (an array lookup).
3413 */
3414 ifp = ifnet_byindex(ifindex);
3415 if (ifp == NULL) {
3416 CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.",
3417 __func__, m, ifindex);
3418 m_freem(m);
3419 IPSTAT_INC(ips_noroute);
3420 goto out;
3421 }
3422
3423 ipopts = V_igmp_sendra ? m_raopt : NULL;
3424
3425 imo.imo_multicast_ttl = 1;
3426 imo.imo_multicast_vif = -1;
3427 imo.imo_multicast_loop = (V_ip_mrouter != NULL);
3428
3429 /*
3430 * If the user requested that IGMP traffic be explicitly
3431 * redirected to the loopback interface (e.g. they are running a
3432 * MANET interface and the routing protocol needs to see the
3433 * updates), handle this now.
3434 */
3435 if (m->m_flags & M_IGMP_LOOP)
3436 imo.imo_multicast_ifp = V_loif;
3437 else
3438 imo.imo_multicast_ifp = ifp;
3439
3440 if (m->m_flags & M_IGMPV2) {
3441 m0 = m;
3442 } else {
3443 m0 = igmp_v3_encap_report(ifp, m);
3444 if (m0 == NULL) {
3445 CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m);
3446 m_freem(m);
3447 IPSTAT_INC(ips_odropped);
3448 goto out;
3449 }
3450 }
3451
3452 igmp_scrub_context(m0);
3453 m->m_flags &= ~(M_PROTOFLAGS);
3454 m0->m_pkthdr.rcvif = V_loif;
3455 #ifdef MAC
3456 mac_netinet_igmp_send(ifp, m0);
3457 #endif
3458 error = ip_output(m0, ipopts, NULL, 0, &imo, NULL);
3459 if (error) {
3460 CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error);
3461 goto out;
3462 }
3463
3464 IGMPSTAT_INC(igps_snd_reports);
3465
3466 out:
3467 /*
3468 * We must restore the existing vnet pointer before
3469 * continuing as we are run from netisr context.
3470 */
3471 CURVNET_RESTORE();
3472 }
3473
3474 /*
3475 * Encapsulate an IGMPv3 report.
3476 *
3477 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf
3478 * chain has already had its IP/IGMPv3 header prepended. In this case
3479 * the function will not attempt to prepend; the lengths and checksums
3480 * will however be re-computed.
3481 *
3482 * Returns a pointer to the new mbuf chain head, or NULL if the
3483 * allocation failed.
3484 */
3485 static struct mbuf *
3486 igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m)
3487 {
3488 struct igmp_report *igmp;
3489 struct ip *ip;
3490 int hdrlen, igmpreclen;
3491
3492 KASSERT((m->m_flags & M_PKTHDR),
3493 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3494
3495 igmpreclen = m_length(m, NULL);
3496 hdrlen = sizeof(struct ip) + sizeof(struct igmp_report);
3497
3498 if (m->m_flags & M_IGMPV3_HDR) {
3499 igmpreclen -= hdrlen;
3500 } else {
3501 M_PREPEND(m, hdrlen, M_DONTWAIT);
3502 if (m == NULL)
3503 return (NULL);
3504 m->m_flags |= M_IGMPV3_HDR;
3505 }
3506
3507 CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen);
3508
3509 m->m_data += sizeof(struct ip);
3510 m->m_len -= sizeof(struct ip);
3511
3512 igmp = mtod(m, struct igmp_report *);
3513 igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT;
3514 igmp->ir_rsv1 = 0;
3515 igmp->ir_rsv2 = 0;
3516 igmp->ir_numgrps = htons(m->m_pkthdr.PH_vt.vt_nrecs);
3517 igmp->ir_cksum = 0;
3518 igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen);
3519 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3520
3521 m->m_data -= sizeof(struct ip);
3522 m->m_len += sizeof(struct ip);
3523
3524 ip = mtod(m, struct ip *);
3525 ip->ip_tos = IPTOS_PREC_INTERNETCONTROL;
3526 ip->ip_len = hdrlen + igmpreclen;
3527 ip->ip_off = IP_DF;
3528 ip->ip_p = IPPROTO_IGMP;
3529 ip->ip_sum = 0;
3530
3531 ip->ip_src.s_addr = INADDR_ANY;
3532
3533 if (m->m_flags & M_IGMP_LOOP) {
3534 struct in_ifaddr *ia;
3535
3536 IFP_TO_IA(ifp, ia);
3537 if (ia != NULL) {
3538 ip->ip_src = ia->ia_addr.sin_addr;
3539 ifa_free(&ia->ia_ifa);
3540 }
3541 }
3542
3543 ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP);
3544
3545 return (m);
3546 }
3547
3548 #ifdef KTR
3549 static char *
3550 igmp_rec_type_to_str(const int type)
3551 {
3552
3553 switch (type) {
3554 case IGMP_CHANGE_TO_EXCLUDE_MODE:
3555 return "TO_EX";
3556 break;
3557 case IGMP_CHANGE_TO_INCLUDE_MODE:
3558 return "TO_IN";
3559 break;
3560 case IGMP_MODE_IS_EXCLUDE:
3561 return "MODE_EX";
3562 break;
3563 case IGMP_MODE_IS_INCLUDE:
3564 return "MODE_IN";
3565 break;
3566 case IGMP_ALLOW_NEW_SOURCES:
3567 return "ALLOW_NEW";
3568 break;
3569 case IGMP_BLOCK_OLD_SOURCES:
3570 return "BLOCK_OLD";
3571 break;
3572 default:
3573 break;
3574 }
3575 return "unknown";
3576 }
3577 #endif
3578
3579 static void
3580 igmp_init(void *unused __unused)
3581 {
3582
3583 CTR1(KTR_IGMPV3, "%s: initializing", __func__);
3584
3585 IGMP_LOCK_INIT();
3586
3587 m_raopt = igmp_ra_alloc();
3588
3589 netisr_register(&igmp_nh);
3590 }
3591 SYSINIT(igmp_init, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, igmp_init, NULL);
3592
3593 static void
3594 igmp_uninit(void *unused __unused)
3595 {
3596
3597 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3598
3599 netisr_unregister(&igmp_nh);
3600
3601 m_free(m_raopt);
3602 m_raopt = NULL;
3603
3604 IGMP_LOCK_DESTROY();
3605 }
3606 SYSUNINIT(igmp_uninit, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, igmp_uninit, NULL);
3607
3608 static void
3609 vnet_igmp_init(const void *unused __unused)
3610 {
3611
3612 CTR1(KTR_IGMPV3, "%s: initializing", __func__);
3613
3614 LIST_INIT(&V_igi_head);
3615 }
3616 VNET_SYSINIT(vnet_igmp_init, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_igmp_init,
3617 NULL);
3618
3619 static void
3620 vnet_igmp_uninit(const void *unused __unused)
3621 {
3622
3623 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3624
3625 KASSERT(LIST_EMPTY(&V_igi_head),
3626 ("%s: igi list not empty; ifnets not detached?", __func__));
3627 }
3628 VNET_SYSUNINIT(vnet_igmp_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY,
3629 vnet_igmp_uninit, NULL);
3630
3631 static int
3632 igmp_modevent(module_t mod, int type, void *unused __unused)
3633 {
3634
3635 switch (type) {
3636 case MOD_LOAD:
3637 case MOD_UNLOAD:
3638 break;
3639 default:
3640 return (EOPNOTSUPP);
3641 }
3642 return (0);
3643 }
3644
3645 static moduledata_t igmp_mod = {
3646 "igmp",
3647 igmp_modevent,
3648 0
3649 };
3650 DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
Cache object: b7d8f218ab80cee58df46242f1225c94
|