The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet6/mld6.c

Version: -  FREEBSD  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-2  -  FREEBSD-11-1  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-4  -  FREEBSD-10-3  -  FREEBSD-10-2  -  FREEBSD-10-1  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-3  -  FREEBSD-9-2  -  FREEBSD-9-1  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-4  -  FREEBSD-8-3  -  FREEBSD-8-2  -  FREEBSD-8-1  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-4  -  FREEBSD-7-3  -  FREEBSD-7-2  -  FREEBSD-7-1  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-4  -  FREEBSD-6-3  -  FREEBSD-6-2  -  FREEBSD-6-1  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-5  -  FREEBSD-5-4  -  FREEBSD-5-3  -  FREEBSD-5-2  -  FREEBSD-5-1  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2009 Bruce Simpson.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  * 3. The name of the author may not be used to endorse or promote
   13  *    products derived from this software without specific prior written
   14  *    permission.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  *      $KAME: mld6.c,v 1.27 2001/04/04 05:17:30 itojun Exp $
   29  */
   30 
   31 /*-
   32  * Copyright (c) 1988 Stephen Deering.
   33  * Copyright (c) 1992, 1993
   34  *      The Regents of the University of California.  All rights reserved.
   35  *
   36  * This code is derived from software contributed to Berkeley by
   37  * Stephen Deering of Stanford University.
   38  *
   39  * Redistribution and use in source and binary forms, with or without
   40  * modification, are permitted provided that the following conditions
   41  * are met:
   42  * 1. Redistributions of source code must retain the above copyright
   43  *    notice, this list of conditions and the following disclaimer.
   44  * 2. Redistributions in binary form must reproduce the above copyright
   45  *    notice, this list of conditions and the following disclaimer in the
   46  *    documentation and/or other materials provided with the distribution.
   47  * 4. Neither the name of the University nor the names of its contributors
   48  *    may be used to endorse or promote products derived from this software
   49  *    without specific prior written permission.
   50  *
   51  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   54  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   55  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   61  * SUCH DAMAGE.
   62  *
   63  *      @(#)igmp.c      8.1 (Berkeley) 7/19/93
   64  */
   65 
   66 #include <sys/cdefs.h>
   67 __FBSDID("$FreeBSD: releng/8.3/sys/netinet6/mld6.c 230077 2012-01-13 19:51:15Z jhb $");
   68 
   69 #include "opt_inet.h"
   70 #include "opt_inet6.h"
   71 
   72 #include <sys/param.h>
   73 #include <sys/systm.h>
   74 #include <sys/mbuf.h>
   75 #include <sys/socket.h>
   76 #include <sys/protosw.h>
   77 #include <sys/sysctl.h>
   78 #include <sys/kernel.h>
   79 #include <sys/callout.h>
   80 #include <sys/malloc.h>
   81 #include <sys/module.h>
   82 #include <sys/ktr.h>
   83 
   84 #include <net/if.h>
   85 #include <net/route.h>
   86 #include <net/vnet.h>
   87 
   88 #include <netinet/in.h>
   89 #include <netinet/in_var.h>
   90 #include <netinet6/in6_var.h>
   91 #include <netinet/ip6.h>
   92 #include <netinet6/ip6_var.h>
   93 #include <netinet6/scope6_var.h>
   94 #include <netinet/icmp6.h>
   95 #include <netinet6/mld6.h>
   96 #include <netinet6/mld6_var.h>
   97 
   98 #include <security/mac/mac_framework.h>
   99 
  100 #ifndef KTR_MLD
  101 #define KTR_MLD KTR_INET6
  102 #endif
  103 
  104 static struct mld_ifinfo *
  105                 mli_alloc_locked(struct ifnet *);
  106 static void     mli_delete_locked(const struct ifnet *);
  107 static void     mld_dispatch_packet(struct mbuf *);
  108 static void     mld_dispatch_queue(struct ifqueue *, int);
  109 static void     mld_final_leave(struct in6_multi *, struct mld_ifinfo *);
  110 static void     mld_fasttimo_vnet(void);
  111 static int      mld_handle_state_change(struct in6_multi *,
  112                     struct mld_ifinfo *);
  113 static int      mld_initial_join(struct in6_multi *, struct mld_ifinfo *,
  114                     const int);
  115 #ifdef KTR
  116 static char *   mld_rec_type_to_str(const int);
  117 #endif
  118 static void     mld_set_version(struct mld_ifinfo *, const int);
  119 static void     mld_slowtimo_vnet(void);
  120 static int      mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
  121                     /*const*/ struct mld_hdr *);
  122 static int      mld_v1_input_report(struct ifnet *, const struct ip6_hdr *,
  123                     /*const*/ struct mld_hdr *);
  124 static void     mld_v1_process_group_timer(struct mld_ifinfo *,
  125                     struct in6_multi *);
  126 static void     mld_v1_process_querier_timers(struct mld_ifinfo *);
  127 static int      mld_v1_transmit_report(struct in6_multi *, const int);
  128 static void     mld_v1_update_group(struct in6_multi *, const int);
  129 static void     mld_v2_cancel_link_timers(struct mld_ifinfo *);
  130 static void     mld_v2_dispatch_general_query(struct mld_ifinfo *);
  131 static struct mbuf *
  132                 mld_v2_encap_report(struct ifnet *, struct mbuf *);
  133 static int      mld_v2_enqueue_filter_change(struct ifqueue *,
  134                     struct in6_multi *);
  135 static int      mld_v2_enqueue_group_record(struct ifqueue *,
  136                     struct in6_multi *, const int, const int, const int,
  137                     const int);
  138 static int      mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
  139                     struct mbuf *, const int, const int);
  140 static int      mld_v2_merge_state_changes(struct in6_multi *,
  141                     struct ifqueue *);
  142 static void     mld_v2_process_group_timers(struct mld_ifinfo *,
  143                     struct ifqueue *, struct ifqueue *,
  144                     struct in6_multi *, const int);
  145 static int      mld_v2_process_group_query(struct in6_multi *,
  146                     struct mld_ifinfo *mli, int, struct mbuf *, const int);
  147 static int      sysctl_mld_gsr(SYSCTL_HANDLER_ARGS);
  148 static int      sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS);
  149 
  150 /*
  151  * Normative references: RFC 2710, RFC 3590, RFC 3810.
  152  *
  153  * Locking:
  154  *  * The MLD subsystem lock ends up being system-wide for the moment,
  155  *    but could be per-VIMAGE later on.
  156  *  * The permitted lock order is: IN6_MULTI_LOCK, MLD_LOCK, IF_ADDR_LOCK.
  157  *    Any may be taken independently; if any are held at the same
  158  *    time, the above lock order must be followed.
  159  *  * IN6_MULTI_LOCK covers in_multi.
  160  *  * MLD_LOCK covers per-link state and any global variables in this file.
  161  *  * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
  162  *    per-link state iterators.
  163  *
  164  *  XXX LOR PREVENTION
  165  *  A special case for IPv6 is the in6_setscope() routine. ip6_output()
  166  *  will not accept an ifp; it wants an embedded scope ID, unlike
  167  *  ip_output(), which happily takes the ifp given to it. The embedded
  168  *  scope ID is only used by MLD to select the outgoing interface.
  169  *
  170  *  During interface attach and detach, MLD will take MLD_LOCK *after*
  171  *  the IF_AFDATA_LOCK.
  172  *  As in6_setscope() takes IF_AFDATA_LOCK then SCOPE_LOCK, we can't call
  173  *  it with MLD_LOCK held without triggering an LOR. A netisr with indirect
  174  *  dispatch could work around this, but we'd rather not do that, as it
  175  *  can introduce other races.
  176  *
  177  *  As such, we exploit the fact that the scope ID is just the interface
  178  *  index, and embed it in the IPv6 destination address accordingly.
  179  *  This is potentially NOT VALID for MLDv1 reports, as they
  180  *  are always sent to the multicast group itself; as MLDv2
  181  *  reports are always sent to ff02::16, this is not an issue
  182  *  when MLDv2 is in use.
  183  *
  184  *  This does not however eliminate the LOR when ip6_output() itself
  185  *  calls in6_setscope() internally whilst MLD_LOCK is held. This will
  186  *  trigger a LOR warning in WITNESS when the ifnet is detached.
  187  *
  188  *  The right answer is probably to make IF_AFDATA_LOCK an rwlock, given
  189  *  how it's used across the network stack. Here we're simply exploiting
  190  *  the fact that MLD runs at a similar layer in the stack to scope6.c.
  191  *
  192  * VIMAGE:
  193  *  * Each in6_multi corresponds to an ifp, and each ifp corresponds
  194  *    to a vnet in ifp->if_vnet.
  195  */
  196 static struct mtx                mld_mtx;
  197 MALLOC_DEFINE(M_MLD, "mld", "mld state");
  198 
  199 #define MLD_EMBEDSCOPE(pin6, zoneid)                                    \
  200         if (IN6_IS_SCOPE_LINKLOCAL(pin6) ||                             \
  201             IN6_IS_ADDR_MC_INTFACELOCAL(pin6))                          \
  202                 (pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF)         \
  203 
  204 /*
  205  * VIMAGE-wide globals.
  206  */
  207 static VNET_DEFINE(struct timeval, mld_gsrdelay) = {10, 0};
  208 static VNET_DEFINE(LIST_HEAD(, mld_ifinfo), mli_head);
  209 static VNET_DEFINE(int, interface_timers_running6);
  210 static VNET_DEFINE(int, state_change_timers_running6);
  211 static VNET_DEFINE(int, current_state_timers_running6);
  212 
  213 #define V_mld_gsrdelay                  VNET(mld_gsrdelay)
  214 #define V_mli_head                      VNET(mli_head)
  215 #define V_interface_timers_running6     VNET(interface_timers_running6)
  216 #define V_state_change_timers_running6  VNET(state_change_timers_running6)
  217 #define V_current_state_timers_running6 VNET(current_state_timers_running6)
  218 
  219 SYSCTL_DECL(_net_inet6);        /* Note: Not in any common header. */
  220 
  221 SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW, 0,
  222     "IPv6 Multicast Listener Discovery");
  223 
  224 /*
  225  * Virtualized sysctls.
  226  */
  227 SYSCTL_VNET_PROC(_net_inet6_mld, OID_AUTO, gsrdelay,
  228     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
  229     &VNET_NAME(mld_gsrdelay.tv_sec), 0, sysctl_mld_gsr, "I",
  230     "Rate limit for MLDv2 Group-and-Source queries in seconds");
  231 
  232 /*
  233  * Non-virtualized sysctls.
  234  */
  235 SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_MPSAFE,
  236     sysctl_mld_ifinfo, "Per-interface MLDv2 state");
  237 
  238 static int      mld_v1enable = 1;
  239 SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RW,
  240     &mld_v1enable, 0, "Enable fallback to MLDv1");
  241 TUNABLE_INT("net.inet6.mld.v1enable", &mld_v1enable);
  242 
  243 static int      mld_use_allow = 1;
  244 SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RW,
  245     &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
  246 TUNABLE_INT("net.inet6.mld.use_allow", &mld_use_allow);
  247 
  248 /*
  249  * Packed Router Alert option structure declaration.
  250  */
  251 struct mld_raopt {
  252         struct ip6_hbh          hbh;
  253         struct ip6_opt          pad;
  254         struct ip6_opt_router   ra;
  255 } __packed;
  256 
  257 /*
  258  * Router Alert hop-by-hop option header.
  259  */
  260 static struct mld_raopt mld_ra = {
  261         .hbh = { 0, 0 },
  262         .pad = { .ip6o_type = IP6OPT_PADN, 0 },
  263         .ra = {
  264             .ip6or_type = IP6OPT_ROUTER_ALERT,
  265             .ip6or_len = IP6OPT_RTALERT_LEN - 2,
  266             .ip6or_value[0] = ((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
  267             .ip6or_value[1] = (IP6OPT_RTALERT_MLD & 0xFF)
  268         }
  269 };
  270 static struct ip6_pktopts mld_po;
  271 
  272 static __inline void
  273 mld_save_context(struct mbuf *m, struct ifnet *ifp)
  274 {
  275 
  276 #ifdef VIMAGE
  277         m->m_pkthdr.header = ifp->if_vnet;
  278 #endif /* VIMAGE */
  279         m->m_pkthdr.flowid = ifp->if_index;
  280 }
  281 
  282 static __inline void
  283 mld_scrub_context(struct mbuf *m)
  284 {
  285 
  286         m->m_pkthdr.header = NULL;
  287         m->m_pkthdr.flowid = 0;
  288 }
  289 
  290 /*
  291  * Restore context from a queued output chain.
  292  * Return saved ifindex.
  293  *
  294  * VIMAGE: The assertion is there to make sure that we
  295  * actually called CURVNET_SET() with what's in the mbuf chain.
  296  */
  297 static __inline uint32_t
  298 mld_restore_context(struct mbuf *m)
  299 {
  300 
  301 #if defined(VIMAGE) && defined(INVARIANTS)
  302         KASSERT(curvnet == m->m_pkthdr.header,
  303             ("%s: called when curvnet was not restored", __func__));
  304 #endif
  305         return (m->m_pkthdr.flowid);
  306 }
  307 
  308 /*
  309  * Retrieve or set threshold between group-source queries in seconds.
  310  *
  311  * VIMAGE: Assume curvnet set by caller.
  312  * SMPng: NOTE: Serialized by MLD lock.
  313  */
  314 static int
  315 sysctl_mld_gsr(SYSCTL_HANDLER_ARGS)
  316 {
  317         int error;
  318         int i;
  319 
  320         error = sysctl_wire_old_buffer(req, sizeof(int));
  321         if (error)
  322                 return (error);
  323 
  324         MLD_LOCK();
  325 
  326         i = V_mld_gsrdelay.tv_sec;
  327 
  328         error = sysctl_handle_int(oidp, &i, 0, req);
  329         if (error || !req->newptr)
  330                 goto out_locked;
  331 
  332         if (i < -1 || i >= 60) {
  333                 error = EINVAL;
  334                 goto out_locked;
  335         }
  336 
  337         CTR2(KTR_MLD, "change mld_gsrdelay from %d to %d",
  338              V_mld_gsrdelay.tv_sec, i);
  339         V_mld_gsrdelay.tv_sec = i;
  340 
  341 out_locked:
  342         MLD_UNLOCK();
  343         return (error);
  344 }
  345 
  346 /*
  347  * Expose struct mld_ifinfo to userland, keyed by ifindex.
  348  * For use by ifmcstat(8).
  349  *
  350  * SMPng: NOTE: Does an unlocked ifindex space read.
  351  * VIMAGE: Assume curvnet set by caller. The node handler itself
  352  * is not directly virtualized.
  353  */
  354 static int
  355 sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS)
  356 {
  357         int                     *name;
  358         int                      error;
  359         u_int                    namelen;
  360         struct ifnet            *ifp;
  361         struct mld_ifinfo       *mli;
  362 
  363         name = (int *)arg1;
  364         namelen = arg2;
  365 
  366         if (req->newptr != NULL)
  367                 return (EPERM);
  368 
  369         if (namelen != 1)
  370                 return (EINVAL);
  371 
  372         error = sysctl_wire_old_buffer(req, sizeof(struct mld_ifinfo));
  373         if (error)
  374                 return (error);
  375 
  376         IN6_MULTI_LOCK();
  377         MLD_LOCK();
  378 
  379         if (name[0] <= 0 || name[0] > V_if_index) {
  380                 error = ENOENT;
  381                 goto out_locked;
  382         }
  383 
  384         error = ENOENT;
  385 
  386         ifp = ifnet_byindex(name[0]);
  387         if (ifp == NULL)
  388                 goto out_locked;
  389 
  390         LIST_FOREACH(mli, &V_mli_head, mli_link) {
  391                 if (ifp == mli->mli_ifp) {
  392                         error = SYSCTL_OUT(req, mli,
  393                             sizeof(struct mld_ifinfo));
  394                         break;
  395                 }
  396         }
  397 
  398 out_locked:
  399         MLD_UNLOCK();
  400         IN6_MULTI_UNLOCK();
  401         return (error);
  402 }
  403 
  404 /*
  405  * Dispatch an entire queue of pending packet chains.
  406  * VIMAGE: Assumes the vnet pointer has been set.
  407  */
  408 static void
  409 mld_dispatch_queue(struct ifqueue *ifq, int limit)
  410 {
  411         struct mbuf *m;
  412 
  413         for (;;) {
  414                 _IF_DEQUEUE(ifq, m);
  415                 if (m == NULL)
  416                         break;
  417                 CTR3(KTR_MLD, "%s: dispatch %p from %p", __func__, ifq, m);
  418                 mld_dispatch_packet(m);
  419                 if (--limit == 0)
  420                         break;
  421         }
  422 }
  423 
  424 /*
  425  * Filter outgoing MLD report state by group.
  426  *
  427  * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
  428  * and node-local addresses. However, kernel and socket consumers
  429  * always embed the KAME scope ID in the address provided, so strip it
  430  * when performing comparison.
  431  * Note: This is not the same as the *multicast* scope.
  432  *
  433  * Return zero if the given group is one for which MLD reports
  434  * should be suppressed, or non-zero if reports should be issued.
  435  */
  436 static __inline int
  437 mld_is_addr_reported(const struct in6_addr *addr)
  438 {
  439 
  440         KASSERT(IN6_IS_ADDR_MULTICAST(addr), ("%s: not multicast", __func__));
  441 
  442         if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL)
  443                 return (0);
  444 
  445         if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) {
  446                 struct in6_addr tmp = *addr;
  447                 in6_clearscope(&tmp);
  448                 if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes))
  449                         return (0);
  450         }
  451 
  452         return (1);
  453 }
  454 
  455 /*
  456  * Attach MLD when PF_INET6 is attached to an interface.
  457  *
  458  * SMPng: Normally called with IF_AFDATA_LOCK held.
  459  */
  460 struct mld_ifinfo *
  461 mld_domifattach(struct ifnet *ifp)
  462 {
  463         struct mld_ifinfo *mli;
  464 
  465         CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
  466             __func__, ifp, ifp->if_xname);
  467 
  468         MLD_LOCK();
  469 
  470         mli = mli_alloc_locked(ifp);
  471         if (!(ifp->if_flags & IFF_MULTICAST))
  472                 mli->mli_flags |= MLIF_SILENT;
  473         if (mld_use_allow)
  474                 mli->mli_flags |= MLIF_USEALLOW;
  475 
  476         MLD_UNLOCK();
  477 
  478         return (mli);
  479 }
  480 
  481 /*
  482  * VIMAGE: assume curvnet set by caller.
  483  */
  484 static struct mld_ifinfo *
  485 mli_alloc_locked(/*const*/ struct ifnet *ifp)
  486 {
  487         struct mld_ifinfo *mli;
  488 
  489         MLD_LOCK_ASSERT();
  490 
  491         mli = malloc(sizeof(struct mld_ifinfo), M_MLD, M_NOWAIT|M_ZERO);
  492         if (mli == NULL)
  493                 goto out;
  494 
  495         mli->mli_ifp = ifp;
  496         mli->mli_version = MLD_VERSION_2;
  497         mli->mli_flags = 0;
  498         mli->mli_rv = MLD_RV_INIT;
  499         mli->mli_qi = MLD_QI_INIT;
  500         mli->mli_qri = MLD_QRI_INIT;
  501         mli->mli_uri = MLD_URI_INIT;
  502 
  503         SLIST_INIT(&mli->mli_relinmhead);
  504 
  505         /*
  506          * Responses to general queries are subject to bounds.
  507          */
  508         IFQ_SET_MAXLEN(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS);
  509 
  510         LIST_INSERT_HEAD(&V_mli_head, mli, mli_link);
  511 
  512         CTR2(KTR_MLD, "allocate mld_ifinfo for ifp %p(%s)",
  513              ifp, ifp->if_xname);
  514 
  515 out:
  516         return (mli);
  517 }
  518 
  519 /*
  520  * Hook for ifdetach.
  521  *
  522  * NOTE: Some finalization tasks need to run before the protocol domain
  523  * is detached, but also before the link layer does its cleanup.
  524  * Run before link-layer cleanup; cleanup groups, but do not free MLD state.
  525  *
  526  * SMPng: Caller must hold IN6_MULTI_LOCK().
  527  * Must take IF_ADDR_LOCK() to cover if_multiaddrs iterator.
  528  * XXX This routine is also bitten by unlocked ifma_protospec access.
  529  */
  530 void
  531 mld_ifdetach(struct ifnet *ifp)
  532 {
  533         struct mld_ifinfo       *mli;
  534         struct ifmultiaddr      *ifma;
  535         struct in6_multi        *inm, *tinm;
  536 
  537         CTR3(KTR_MLD, "%s: called for ifp %p(%s)", __func__, ifp,
  538             ifp->if_xname);
  539 
  540         IN6_MULTI_LOCK_ASSERT();
  541         MLD_LOCK();
  542 
  543         mli = MLD_IFINFO(ifp);
  544         if (mli->mli_version == MLD_VERSION_2) {
  545                 IF_ADDR_LOCK(ifp);
  546                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  547                         if (ifma->ifma_addr->sa_family != AF_INET6 ||
  548                             ifma->ifma_protospec == NULL)
  549                                 continue;
  550                         inm = (struct in6_multi *)ifma->ifma_protospec;
  551                         if (inm->in6m_state == MLD_LEAVING_MEMBER) {
  552                                 SLIST_INSERT_HEAD(&mli->mli_relinmhead,
  553                                     inm, in6m_nrele);
  554                         }
  555                         in6m_clear_recorded(inm);
  556                 }
  557                 IF_ADDR_UNLOCK(ifp);
  558                 SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead, in6m_nrele,
  559                     tinm) {
  560                         SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
  561                         in6m_release_locked(inm);
  562                 }
  563         }
  564 
  565         MLD_UNLOCK();
  566 }
  567 
  568 /*
  569  * Hook for domifdetach.
  570  * Runs after link-layer cleanup; free MLD state.
  571  *
  572  * SMPng: Normally called with IF_AFDATA_LOCK held.
  573  */
  574 void
  575 mld_domifdetach(struct ifnet *ifp)
  576 {
  577 
  578         CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
  579             __func__, ifp, ifp->if_xname);
  580 
  581         MLD_LOCK();
  582         mli_delete_locked(ifp);
  583         MLD_UNLOCK();
  584 }
  585 
  586 static void
  587 mli_delete_locked(const struct ifnet *ifp)
  588 {
  589         struct mld_ifinfo *mli, *tmli;
  590 
  591         CTR3(KTR_MLD, "%s: freeing mld_ifinfo for ifp %p(%s)",
  592             __func__, ifp, ifp->if_xname);
  593 
  594         MLD_LOCK_ASSERT();
  595 
  596         LIST_FOREACH_SAFE(mli, &V_mli_head, mli_link, tmli) {
  597                 if (mli->mli_ifp == ifp) {
  598                         /*
  599                          * Free deferred General Query responses.
  600                          */
  601                         _IF_DRAIN(&mli->mli_gq);
  602 
  603                         LIST_REMOVE(mli, mli_link);
  604 
  605                         KASSERT(SLIST_EMPTY(&mli->mli_relinmhead),
  606                             ("%s: there are dangling in_multi references",
  607                             __func__));
  608 
  609                         free(mli, M_MLD);
  610                         return;
  611                 }
  612         }
  613 #ifdef INVARIANTS
  614         panic("%s: mld_ifinfo not found for ifp %p\n", __func__,  ifp);
  615 #endif
  616 }
  617 
  618 /*
  619  * Process a received MLDv1 general or address-specific query.
  620  * Assumes that the query header has been pulled up to sizeof(mld_hdr).
  621  *
  622  * NOTE: Can't be fully const correct as we temporarily embed scope ID in
  623  * mld_addr. This is OK as we own the mbuf chain.
  624  */
  625 static int
  626 mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
  627     /*const*/ struct mld_hdr *mld)
  628 {
  629         struct ifmultiaddr      *ifma;
  630         struct mld_ifinfo       *mli;
  631         struct in6_multi        *inm;
  632         int                      is_general_query;
  633         uint16_t                 timer;
  634 #ifdef KTR
  635         char                     ip6tbuf[INET6_ADDRSTRLEN];
  636 #endif
  637 
  638         is_general_query = 0;
  639 
  640         if (!mld_v1enable) {
  641                 CTR3(KTR_MLD, "ignore v1 query %s on ifp %p(%s)",
  642                     ip6_sprintf(ip6tbuf, &mld->mld_addr),
  643                     ifp, ifp->if_xname);
  644                 return (0);
  645         }
  646 
  647         /*
  648          * RFC3810 Section 6.2: MLD queries must originate from
  649          * a router's link-local address.
  650          */
  651         if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
  652                 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
  653                     ip6_sprintf(ip6tbuf, &ip6->ip6_src),
  654                     ifp, ifp->if_xname);
  655                 return (0);
  656         }
  657 
  658         /*
  659          * Do address field validation upfront before we accept
  660          * the query.
  661          */
  662         if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
  663                 /*
  664                  * MLDv1 General Query.
  665                  * If this was not sent to the all-nodes group, ignore it.
  666                  */
  667                 struct in6_addr          dst;
  668 
  669                 dst = ip6->ip6_dst;
  670                 in6_clearscope(&dst);
  671                 if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes))
  672                         return (EINVAL);
  673                 is_general_query = 1;
  674         } else {
  675                 /*
  676                  * Embed scope ID of receiving interface in MLD query for
  677                  * lookup whilst we don't hold other locks.
  678                  */
  679                 in6_setscope(&mld->mld_addr, ifp, NULL);
  680         }
  681 
  682         IN6_MULTI_LOCK();
  683         MLD_LOCK();
  684         IF_ADDR_LOCK(ifp);
  685 
  686         /*
  687          * Switch to MLDv1 host compatibility mode.
  688          */
  689         mli = MLD_IFINFO(ifp);
  690         KASSERT(mli != NULL, ("%s: no mld_ifinfo for ifp %p", __func__, ifp));
  691         mld_set_version(mli, MLD_VERSION_1);
  692 
  693         timer = (ntohs(mld->mld_maxdelay) * PR_FASTHZ) / MLD_TIMER_SCALE;
  694         if (timer == 0)
  695                 timer = 1;
  696 
  697         if (is_general_query) {
  698                 /*
  699                  * For each reporting group joined on this
  700                  * interface, kick the report timer.
  701                  */
  702                 CTR2(KTR_MLD, "process v1 general query on ifp %p(%s)",
  703                     ifp, ifp->if_xname);
  704                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  705                         if (ifma->ifma_addr->sa_family != AF_INET6 ||
  706                             ifma->ifma_protospec == NULL)
  707                                 continue;
  708                         inm = (struct in6_multi *)ifma->ifma_protospec;
  709                         mld_v1_update_group(inm, timer);
  710                 }
  711         } else {
  712                 /*
  713                  * MLDv1 Group-Specific Query.
  714                  * If this is a group-specific MLDv1 query, we need only
  715                  * look up the single group to process it.
  716                  */
  717                 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
  718                 if (inm != NULL) {
  719                         CTR3(KTR_MLD, "process v1 query %s on ifp %p(%s)",
  720                             ip6_sprintf(ip6tbuf, &mld->mld_addr),
  721                             ifp, ifp->if_xname);
  722                         mld_v1_update_group(inm, timer);
  723                 }
  724                 /* XXX Clear embedded scope ID as userland won't expect it. */
  725                 in6_clearscope(&mld->mld_addr);
  726         }
  727 
  728         IF_ADDR_UNLOCK(ifp);
  729         MLD_UNLOCK();
  730         IN6_MULTI_UNLOCK();
  731 
  732         return (0);
  733 }
  734 
  735 /*
  736  * Update the report timer on a group in response to an MLDv1 query.
  737  *
  738  * If we are becoming the reporting member for this group, start the timer.
  739  * If we already are the reporting member for this group, and timer is
  740  * below the threshold, reset it.
  741  *
  742  * We may be updating the group for the first time since we switched
  743  * to MLDv2. If we are, then we must clear any recorded source lists,
  744  * and transition to REPORTING state; the group timer is overloaded
  745  * for group and group-source query responses. 
  746  *
  747  * Unlike MLDv2, the delay per group should be jittered
  748  * to avoid bursts of MLDv1 reports.
  749  */
  750 static void
  751 mld_v1_update_group(struct in6_multi *inm, const int timer)
  752 {
  753 #ifdef KTR
  754         char                     ip6tbuf[INET6_ADDRSTRLEN];
  755 #endif
  756 
  757         CTR4(KTR_MLD, "%s: %s/%s timer=%d", __func__,
  758             ip6_sprintf(ip6tbuf, &inm->in6m_addr),
  759             inm->in6m_ifp->if_xname, timer);
  760 
  761         IN6_MULTI_LOCK_ASSERT();
  762 
  763         switch (inm->in6m_state) {
  764         case MLD_NOT_MEMBER:
  765         case MLD_SILENT_MEMBER:
  766                 break;
  767         case MLD_REPORTING_MEMBER:
  768                 if (inm->in6m_timer != 0 &&
  769                     inm->in6m_timer <= timer) {
  770                         CTR1(KTR_MLD, "%s: REPORTING and timer running, "
  771                             "skipping.", __func__);
  772                         break;
  773                 }
  774                 /* FALLTHROUGH */
  775         case MLD_SG_QUERY_PENDING_MEMBER:
  776         case MLD_G_QUERY_PENDING_MEMBER:
  777         case MLD_IDLE_MEMBER:
  778         case MLD_LAZY_MEMBER:
  779         case MLD_AWAKENING_MEMBER:
  780                 CTR1(KTR_MLD, "%s: ->REPORTING", __func__);
  781                 inm->in6m_state = MLD_REPORTING_MEMBER;
  782                 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
  783                 V_current_state_timers_running6 = 1;
  784                 break;
  785         case MLD_SLEEPING_MEMBER:
  786                 CTR1(KTR_MLD, "%s: ->AWAKENING", __func__);
  787                 inm->in6m_state = MLD_AWAKENING_MEMBER;
  788                 break;
  789         case MLD_LEAVING_MEMBER:
  790                 break;
  791         }
  792 }
  793 
  794 /*
  795  * Process a received MLDv2 general, group-specific or
  796  * group-and-source-specific query.
  797  *
  798  * Assumes that the query header has been pulled up to sizeof(mldv2_query).
  799  *
  800  * Return 0 if successful, otherwise an appropriate error code is returned.
  801  */
  802 static int
  803 mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
  804     struct mbuf *m, const int off, const int icmp6len)
  805 {
  806         struct mld_ifinfo       *mli;
  807         struct mldv2_query      *mld;
  808         struct in6_multi        *inm;
  809         uint32_t                 maxdelay, nsrc, qqi;
  810         int                      is_general_query;
  811         uint16_t                 timer;
  812         uint8_t                  qrv;
  813 #ifdef KTR
  814         char                     ip6tbuf[INET6_ADDRSTRLEN];
  815 #endif
  816 
  817         is_general_query = 0;
  818 
  819         /*
  820          * RFC3810 Section 6.2: MLD queries must originate from
  821          * a router's link-local address.
  822          */
  823         if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
  824                 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
  825                     ip6_sprintf(ip6tbuf, &ip6->ip6_src),
  826                     ifp, ifp->if_xname);
  827                 return (0);
  828         }
  829 
  830         CTR2(KTR_MLD, "input v2 query on ifp %p(%s)", ifp, ifp->if_xname);
  831 
  832         mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off);
  833 
  834         maxdelay = ntohs(mld->mld_maxdelay);    /* in 1/10ths of a second */
  835         if (maxdelay >= 32678) {
  836                 maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) <<
  837                            (MLD_MRC_EXP(maxdelay) + 3);
  838         }
  839         timer = (maxdelay * PR_FASTHZ) / MLD_TIMER_SCALE;
  840         if (timer == 0)
  841                 timer = 1;
  842 
  843         qrv = MLD_QRV(mld->mld_misc);
  844         if (qrv < 2) {
  845                 CTR3(KTR_MLD, "%s: clamping qrv %d to %d", __func__,
  846                     qrv, MLD_RV_INIT);
  847                 qrv = MLD_RV_INIT;
  848         }
  849 
  850         qqi = mld->mld_qqi;
  851         if (qqi >= 128) {
  852                 qqi = MLD_QQIC_MANT(mld->mld_qqi) <<
  853                      (MLD_QQIC_EXP(mld->mld_qqi) + 3);
  854         }
  855 
  856         nsrc = ntohs(mld->mld_numsrc);
  857         if (nsrc > MLD_MAX_GS_SOURCES)
  858                 return (EMSGSIZE);
  859         if (icmp6len < sizeof(struct mldv2_query) +
  860             (nsrc * sizeof(struct in6_addr)))
  861                 return (EMSGSIZE);
  862 
  863         /*
  864          * Do further input validation upfront to avoid resetting timers
  865          * should we need to discard this query.
  866          */
  867         if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
  868                 /*
  869                  * General Queries SHOULD be directed to ff02::1.
  870                  * A general query with a source list has undefined
  871                  * behaviour; discard it.
  872                  */
  873                 struct in6_addr          dst;
  874 
  875                 dst = ip6->ip6_dst;
  876                 in6_clearscope(&dst);
  877                 if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes) ||
  878                     nsrc > 0)
  879                         return (EINVAL);
  880                 is_general_query = 1;
  881         } else {
  882                 /*
  883                  * Embed scope ID of receiving interface in MLD query for
  884                  * lookup whilst we don't hold other locks (due to KAME
  885                  * locking lameness). We own this mbuf chain just now.
  886                  */
  887                 in6_setscope(&mld->mld_addr, ifp, NULL);
  888         }
  889 
  890         IN6_MULTI_LOCK();
  891         MLD_LOCK();
  892         IF_ADDR_LOCK(ifp);
  893 
  894         mli = MLD_IFINFO(ifp);
  895         KASSERT(mli != NULL, ("%s: no mld_ifinfo for ifp %p", __func__, ifp));
  896 
  897         /*
  898          * Discard the v2 query if we're in Compatibility Mode.
  899          * The RFC is pretty clear that hosts need to stay in MLDv1 mode
  900          * until the Old Version Querier Present timer expires.
  901          */
  902         if (mli->mli_version != MLD_VERSION_2)
  903                 goto out_locked;
  904 
  905         mld_set_version(mli, MLD_VERSION_2);
  906         mli->mli_rv = qrv;
  907         mli->mli_qi = qqi;
  908         mli->mli_qri = maxdelay;
  909 
  910         CTR4(KTR_MLD, "%s: qrv %d qi %d maxdelay %d", __func__, qrv, qqi,
  911             maxdelay);
  912 
  913         if (is_general_query) {
  914                 /*
  915                  * MLDv2 General Query.
  916                  *
  917                  * Schedule a current-state report on this ifp for
  918                  * all groups, possibly containing source lists.
  919                  *
  920                  * If there is a pending General Query response
  921                  * scheduled earlier than the selected delay, do
  922                  * not schedule any other reports.
  923                  * Otherwise, reset the interface timer.
  924                  */
  925                 CTR2(KTR_MLD, "process v2 general query on ifp %p(%s)",
  926                     ifp, ifp->if_xname);
  927                 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
  928                         mli->mli_v2_timer = MLD_RANDOM_DELAY(timer);
  929                         V_interface_timers_running6 = 1;
  930                 }
  931         } else {
  932                 /*
  933                  * MLDv2 Group-specific or Group-and-source-specific Query.
  934                  *
  935                  * Group-source-specific queries are throttled on
  936                  * a per-group basis to defeat denial-of-service attempts.
  937                  * Queries for groups we are not a member of on this
  938                  * link are simply ignored.
  939                  */
  940                 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
  941                 if (inm == NULL)
  942                         goto out_locked;
  943                 if (nsrc > 0) {
  944                         if (!ratecheck(&inm->in6m_lastgsrtv,
  945                             &V_mld_gsrdelay)) {
  946                                 CTR1(KTR_MLD, "%s: GS query throttled.",
  947                                     __func__);
  948                                 goto out_locked;
  949                         }
  950                 }
  951                 CTR2(KTR_MLD, "process v2 group query on ifp %p(%s)",
  952                      ifp, ifp->if_xname);
  953                 /*
  954                  * If there is a pending General Query response
  955                  * scheduled sooner than the selected delay, no
  956                  * further report need be scheduled.
  957                  * Otherwise, prepare to respond to the
  958                  * group-specific or group-and-source query.
  959                  */
  960                 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer)
  961                         mld_v2_process_group_query(inm, mli, timer, m, off);
  962 
  963                 /* XXX Clear embedded scope ID as userland won't expect it. */
  964                 in6_clearscope(&mld->mld_addr);
  965         }
  966 
  967 out_locked:
  968         IF_ADDR_UNLOCK(ifp);
  969         MLD_UNLOCK();
  970         IN6_MULTI_UNLOCK();
  971 
  972         return (0);
  973 }
  974 
  975 /*
  976  * Process a recieved MLDv2 group-specific or group-and-source-specific
  977  * query.
  978  * Return <0 if any error occured. Currently this is ignored.
  979  */
  980 static int
  981 mld_v2_process_group_query(struct in6_multi *inm, struct mld_ifinfo *mli,
  982     int timer, struct mbuf *m0, const int off)
  983 {
  984         struct mldv2_query      *mld;
  985         int                      retval;
  986         uint16_t                 nsrc;
  987 
  988         IN6_MULTI_LOCK_ASSERT();
  989         MLD_LOCK_ASSERT();
  990 
  991         retval = 0;
  992         mld = (struct mldv2_query *)(mtod(m0, uint8_t *) + off);
  993 
  994         switch (inm->in6m_state) {
  995         case MLD_NOT_MEMBER:
  996         case MLD_SILENT_MEMBER:
  997         case MLD_SLEEPING_MEMBER:
  998         case MLD_LAZY_MEMBER:
  999         case MLD_AWAKENING_MEMBER:
 1000         case MLD_IDLE_MEMBER:
 1001         case MLD_LEAVING_MEMBER:
 1002                 return (retval);
 1003                 break;
 1004         case MLD_REPORTING_MEMBER:
 1005         case MLD_G_QUERY_PENDING_MEMBER:
 1006         case MLD_SG_QUERY_PENDING_MEMBER:
 1007                 break;
 1008         }
 1009 
 1010         nsrc = ntohs(mld->mld_numsrc);
 1011 
 1012         /*
 1013          * Deal with group-specific queries upfront.
 1014          * If any group query is already pending, purge any recorded
 1015          * source-list state if it exists, and schedule a query response
 1016          * for this group-specific query.
 1017          */
 1018         if (nsrc == 0) {
 1019                 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
 1020                     inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
 1021                         in6m_clear_recorded(inm);
 1022                         timer = min(inm->in6m_timer, timer);
 1023                 }
 1024                 inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER;
 1025                 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
 1026                 V_current_state_timers_running6 = 1;
 1027                 return (retval);
 1028         }
 1029 
 1030         /*
 1031          * Deal with the case where a group-and-source-specific query has
 1032          * been received but a group-specific query is already pending.
 1033          */
 1034         if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) {
 1035                 timer = min(inm->in6m_timer, timer);
 1036                 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
 1037                 V_current_state_timers_running6 = 1;
 1038                 return (retval);
 1039         }
 1040 
 1041         /*
 1042          * Finally, deal with the case where a group-and-source-specific
 1043          * query has been received, where a response to a previous g-s-r
 1044          * query exists, or none exists.
 1045          * In this case, we need to parse the source-list which the Querier
 1046          * has provided us with and check if we have any source list filter
 1047          * entries at T1 for these sources. If we do not, there is no need
 1048          * schedule a report and the query may be dropped.
 1049          * If we do, we must record them and schedule a current-state
 1050          * report for those sources.
 1051          */
 1052         if (inm->in6m_nsrc > 0) {
 1053                 struct mbuf             *m;
 1054                 uint8_t                 *sp;
 1055                 int                      i, nrecorded;
 1056                 int                      soff;
 1057 
 1058                 m = m0;
 1059                 soff = off + sizeof(struct mldv2_query);
 1060                 nrecorded = 0;
 1061                 for (i = 0; i < nsrc; i++) {
 1062                         sp = mtod(m, uint8_t *) + soff;
 1063                         retval = in6m_record_source(inm,
 1064                             (const struct in6_addr *)sp);
 1065                         if (retval < 0)
 1066                                 break;
 1067                         nrecorded += retval;
 1068                         soff += sizeof(struct in6_addr);
 1069                         if (soff >= m->m_len) {
 1070                                 soff = soff - m->m_len;
 1071                                 m = m->m_next;
 1072                                 if (m == NULL)
 1073                                         break;
 1074                         }
 1075                 }
 1076                 if (nrecorded > 0) {
 1077                         CTR1(KTR_MLD,
 1078                             "%s: schedule response to SG query", __func__);
 1079                         inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER;
 1080                         inm->in6m_timer = MLD_RANDOM_DELAY(timer);
 1081                         V_current_state_timers_running6 = 1;
 1082                 }
 1083         }
 1084 
 1085         return (retval);
 1086 }
 1087 
 1088 /*
 1089  * Process a received MLDv1 host membership report.
 1090  * Assumes mld points to mld_hdr in pulled up mbuf chain.
 1091  *
 1092  * NOTE: Can't be fully const correct as we temporarily embed scope ID in
 1093  * mld_addr. This is OK as we own the mbuf chain.
 1094  */
 1095 static int
 1096 mld_v1_input_report(struct ifnet *ifp, const struct ip6_hdr *ip6,
 1097     /*const*/ struct mld_hdr *mld)
 1098 {
 1099         struct in6_addr          src, dst;
 1100         struct in6_ifaddr       *ia;
 1101         struct in6_multi        *inm;
 1102 #ifdef KTR
 1103         char                     ip6tbuf[INET6_ADDRSTRLEN];
 1104 #endif
 1105 
 1106         if (!mld_v1enable) {
 1107                 CTR3(KTR_MLD, "ignore v1 report %s on ifp %p(%s)",
 1108                     ip6_sprintf(ip6tbuf, &mld->mld_addr),
 1109                     ifp, ifp->if_xname);
 1110                 return (0);
 1111         }
 1112 
 1113         if (ifp->if_flags & IFF_LOOPBACK)
 1114                 return (0);
 1115 
 1116         /*
 1117          * MLDv1 reports must originate from a host's link-local address,
 1118          * or the unspecified address (when booting).
 1119          */
 1120         src = ip6->ip6_src;
 1121         in6_clearscope(&src);
 1122         if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) {
 1123                 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
 1124                     ip6_sprintf(ip6tbuf, &ip6->ip6_src),
 1125                     ifp, ifp->if_xname);
 1126                 return (EINVAL);
 1127         }
 1128 
 1129         /*
 1130          * RFC2710 Section 4: MLDv1 reports must pertain to a multicast
 1131          * group, and must be directed to the group itself.
 1132          */
 1133         dst = ip6->ip6_dst;
 1134         in6_clearscope(&dst);
 1135         if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) ||
 1136             !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) {
 1137                 CTR3(KTR_MLD, "ignore v1 query dst %s on ifp %p(%s)",
 1138                     ip6_sprintf(ip6tbuf, &ip6->ip6_dst),
 1139                     ifp, ifp->if_xname);
 1140                 return (EINVAL);
 1141         }
 1142 
 1143         /*
 1144          * Make sure we don't hear our own membership report, as fast
 1145          * leave requires knowing that we are the only member of a
 1146          * group. Assume we used the link-local address if available,
 1147          * otherwise look for ::.
 1148          *
 1149          * XXX Note that scope ID comparison is needed for the address
 1150          * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
 1151          * performed for the on-wire address.
 1152          */
 1153         ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
 1154         if ((ia && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia))) ||
 1155             (ia == NULL && IN6_IS_ADDR_UNSPECIFIED(&src))) {
 1156                 if (ia != NULL)
 1157                         ifa_free(&ia->ia_ifa);
 1158                 return (0);
 1159         }
 1160         if (ia != NULL)
 1161                 ifa_free(&ia->ia_ifa);
 1162 
 1163         CTR3(KTR_MLD, "process v1 report %s on ifp %p(%s)",
 1164             ip6_sprintf(ip6tbuf, &mld->mld_addr), ifp, ifp->if_xname);
 1165 
 1166         /*
 1167          * Embed scope ID of receiving interface in MLD query for lookup
 1168          * whilst we don't hold other locks (due to KAME locking lameness).
 1169          */
 1170         if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr))
 1171                 in6_setscope(&mld->mld_addr, ifp, NULL);
 1172 
 1173         IN6_MULTI_LOCK();
 1174         MLD_LOCK();
 1175         IF_ADDR_LOCK(ifp);
 1176 
 1177         /*
 1178          * MLDv1 report suppression.
 1179          * If we are a member of this group, and our membership should be
 1180          * reported, and our group timer is pending or about to be reset,
 1181          * stop our group timer by transitioning to the 'lazy' state.
 1182          */
 1183         inm = in6m_lookup_locked(ifp, &mld->mld_addr);
 1184         if (inm != NULL) {
 1185                 struct mld_ifinfo *mli;
 1186 
 1187                 mli = inm->in6m_mli;
 1188                 KASSERT(mli != NULL,
 1189                     ("%s: no mli for ifp %p", __func__, ifp));
 1190 
 1191                 /*
 1192                  * If we are in MLDv2 host mode, do not allow the
 1193                  * other host's MLDv1 report to suppress our reports.
 1194                  */
 1195                 if (mli->mli_version == MLD_VERSION_2)
 1196                         goto out_locked;
 1197 
 1198                 inm->in6m_timer = 0;
 1199 
 1200                 switch (inm->in6m_state) {
 1201                 case MLD_NOT_MEMBER:
 1202                 case MLD_SILENT_MEMBER:
 1203                 case MLD_SLEEPING_MEMBER:
 1204                         break;
 1205                 case MLD_REPORTING_MEMBER:
 1206                 case MLD_IDLE_MEMBER:
 1207                 case MLD_AWAKENING_MEMBER:
 1208                         CTR3(KTR_MLD,
 1209                             "report suppressed for %s on ifp %p(%s)",
 1210                             ip6_sprintf(ip6tbuf, &mld->mld_addr),
 1211                             ifp, ifp->if_xname);
 1212                 case MLD_LAZY_MEMBER:
 1213                         inm->in6m_state = MLD_LAZY_MEMBER;
 1214                         break;
 1215                 case MLD_G_QUERY_PENDING_MEMBER:
 1216                 case MLD_SG_QUERY_PENDING_MEMBER:
 1217                 case MLD_LEAVING_MEMBER:
 1218                         break;
 1219                 }
 1220         }
 1221 
 1222 out_locked:
 1223         MLD_UNLOCK();
 1224         IF_ADDR_UNLOCK(ifp);
 1225         IN6_MULTI_UNLOCK();
 1226 
 1227         /* XXX Clear embedded scope ID as userland won't expect it. */
 1228         in6_clearscope(&mld->mld_addr);
 1229 
 1230         return (0);
 1231 }
 1232 
 1233 /*
 1234  * MLD input path.
 1235  *
 1236  * Assume query messages which fit in a single ICMPv6 message header
 1237  * have been pulled up.
 1238  * Assume that userland will want to see the message, even if it
 1239  * otherwise fails kernel input validation; do not free it.
 1240  * Pullup may however free the mbuf chain m if it fails.
 1241  *
 1242  * Return IPPROTO_DONE if we freed m. Otherwise, return 0.
 1243  */
 1244 int
 1245 mld_input(struct mbuf *m, int off, int icmp6len)
 1246 {
 1247         struct ifnet    *ifp;
 1248         struct ip6_hdr  *ip6;
 1249         struct mld_hdr  *mld;
 1250         int              mldlen;
 1251 
 1252         CTR3(KTR_MLD, "%s: called w/mbuf (%p,%d)", __func__, m, off);
 1253 
 1254         ifp = m->m_pkthdr.rcvif;
 1255 
 1256         ip6 = mtod(m, struct ip6_hdr *);
 1257 
 1258         /* Pullup to appropriate size. */
 1259         mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
 1260         if (mld->mld_type == MLD_LISTENER_QUERY &&
 1261             icmp6len >= sizeof(struct mldv2_query)) {
 1262                 mldlen = sizeof(struct mldv2_query);
 1263         } else {
 1264                 mldlen = sizeof(struct mld_hdr);
 1265         }
 1266         IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen);
 1267         if (mld == NULL) {
 1268                 ICMP6STAT_INC(icp6s_badlen);
 1269                 return (IPPROTO_DONE);
 1270         }
 1271 
 1272         /*
 1273          * Userland needs to see all of this traffic for implementing
 1274          * the endpoint discovery portion of multicast routing.
 1275          */
 1276         switch (mld->mld_type) {
 1277         case MLD_LISTENER_QUERY:
 1278                 icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
 1279                 if (icmp6len == sizeof(struct mld_hdr)) {
 1280                         if (mld_v1_input_query(ifp, ip6, mld) != 0)
 1281                                 return (0);
 1282                 } else if (icmp6len >= sizeof(struct mldv2_query)) {
 1283                         if (mld_v2_input_query(ifp, ip6, m, off,
 1284                             icmp6len) != 0)
 1285                                 return (0);
 1286                 }
 1287                 break;
 1288         case MLD_LISTENER_REPORT:
 1289                 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
 1290                 if (mld_v1_input_report(ifp, ip6, mld) != 0)
 1291                         return (0);
 1292                 break;
 1293         case MLDV2_LISTENER_REPORT:
 1294                 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
 1295                 break;
 1296         case MLD_LISTENER_DONE:
 1297                 icmp6_ifstat_inc(ifp, ifs6_in_mlddone);
 1298                 break;
 1299         default:
 1300                 break;
 1301         }
 1302 
 1303         return (0);
 1304 }
 1305 
 1306 /*
 1307  * Fast timeout handler (global).
 1308  * VIMAGE: Timeout handlers are expected to service all vimages.
 1309  */
 1310 void
 1311 mld_fasttimo(void)
 1312 {
 1313         VNET_ITERATOR_DECL(vnet_iter);
 1314 
 1315         VNET_LIST_RLOCK_NOSLEEP();
 1316         VNET_FOREACH(vnet_iter) {
 1317                 CURVNET_SET(vnet_iter);
 1318                 mld_fasttimo_vnet();
 1319                 CURVNET_RESTORE();
 1320         }
 1321         VNET_LIST_RUNLOCK_NOSLEEP();
 1322 }
 1323 
 1324 /*
 1325  * Fast timeout handler (per-vnet).
 1326  *
 1327  * VIMAGE: Assume caller has set up our curvnet.
 1328  */
 1329 static void
 1330 mld_fasttimo_vnet(void)
 1331 {
 1332         struct ifqueue           scq;   /* State-change packets */
 1333         struct ifqueue           qrq;   /* Query response packets */
 1334         struct ifnet            *ifp;
 1335         struct mld_ifinfo       *mli;
 1336         struct ifmultiaddr      *ifma;
 1337         struct in6_multi        *inm, *tinm;
 1338         int                      uri_fasthz;
 1339 
 1340         uri_fasthz = 0;
 1341 
 1342         /*
 1343          * Quick check to see if any work needs to be done, in order to
 1344          * minimize the overhead of fasttimo processing.
 1345          * SMPng: XXX Unlocked reads.
 1346          */
 1347         if (!V_current_state_timers_running6 &&
 1348             !V_interface_timers_running6 &&
 1349             !V_state_change_timers_running6)
 1350                 return;
 1351 
 1352         IN6_MULTI_LOCK();
 1353         MLD_LOCK();
 1354 
 1355         /*
 1356          * MLDv2 General Query response timer processing.
 1357          */
 1358         if (V_interface_timers_running6) {
 1359                 CTR1(KTR_MLD, "%s: interface timers running", __func__);
 1360 
 1361                 V_interface_timers_running6 = 0;
 1362                 LIST_FOREACH(mli, &V_mli_head, mli_link) {
 1363                         if (mli->mli_v2_timer == 0) {
 1364                                 /* Do nothing. */
 1365                         } else if (--mli->mli_v2_timer == 0) {
 1366                                 mld_v2_dispatch_general_query(mli);
 1367                         } else {
 1368                                 V_interface_timers_running6 = 1;
 1369                         }
 1370                 }
 1371         }
 1372 
 1373         if (!V_current_state_timers_running6 &&
 1374             !V_state_change_timers_running6)
 1375                 goto out_locked;
 1376 
 1377         V_current_state_timers_running6 = 0;
 1378         V_state_change_timers_running6 = 0;
 1379 
 1380         CTR1(KTR_MLD, "%s: state change timers running", __func__);
 1381 
 1382         /*
 1383          * MLD host report and state-change timer processing.
 1384          * Note: Processing a v2 group timer may remove a node.
 1385          */
 1386         LIST_FOREACH(mli, &V_mli_head, mli_link) {
 1387                 ifp = mli->mli_ifp;
 1388 
 1389                 if (mli->mli_version == MLD_VERSION_2) {
 1390                         uri_fasthz = MLD_RANDOM_DELAY(mli->mli_uri *
 1391                             PR_FASTHZ);
 1392 
 1393                         memset(&qrq, 0, sizeof(struct ifqueue));
 1394                         IFQ_SET_MAXLEN(&qrq, MLD_MAX_G_GS_PACKETS);
 1395 
 1396                         memset(&scq, 0, sizeof(struct ifqueue));
 1397                         IFQ_SET_MAXLEN(&scq, MLD_MAX_STATE_CHANGE_PACKETS);
 1398                 }
 1399 
 1400                 IF_ADDR_LOCK(ifp);
 1401                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 1402                         if (ifma->ifma_addr->sa_family != AF_INET6 ||
 1403                             ifma->ifma_protospec == NULL)
 1404                                 continue;
 1405                         inm = (struct in6_multi *)ifma->ifma_protospec;
 1406                         switch (mli->mli_version) {
 1407                         case MLD_VERSION_1:
 1408                                 mld_v1_process_group_timer(mli, inm);
 1409                                 break;
 1410                         case MLD_VERSION_2:
 1411                                 mld_v2_process_group_timers(mli, &qrq,
 1412                                     &scq, inm, uri_fasthz);
 1413                                 break;
 1414                         }
 1415                 }
 1416                 IF_ADDR_UNLOCK(ifp);
 1417 
 1418                 switch (mli->mli_version) {
 1419                 case MLD_VERSION_1:
 1420                         /*
 1421                          * Transmit reports for this lifecycle.  This
 1422                          * is done while not holding IF_ADDR_LOCK
 1423                          * since this can call
 1424                          * in6ifa_ifpforlinklocal() which locks
 1425                          * IF_ADDR_LOCK internally as well as
 1426                          * ip6_output() to transmit a packet.
 1427                          */
 1428                         SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead,
 1429                             in6m_nrele, tinm) {
 1430                                 SLIST_REMOVE_HEAD(&mli->mli_relinmhead,
 1431                                     in6m_nrele);
 1432                                 (void)mld_v1_transmit_report(inm,
 1433                                     MLD_LISTENER_REPORT);
 1434                         }
 1435                         break;
 1436                 case MLD_VERSION_2:
 1437                         mld_dispatch_queue(&qrq, 0);
 1438                         mld_dispatch_queue(&scq, 0);
 1439 
 1440                         /*
 1441                          * Free the in_multi reference(s) for
 1442                          * this lifecycle.
 1443                          */
 1444                         SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead,
 1445                             in6m_nrele, tinm) {
 1446                                 SLIST_REMOVE_HEAD(&mli->mli_relinmhead,
 1447                                     in6m_nrele);
 1448                                 in6m_release_locked(inm);
 1449                         }
 1450                         break;
 1451                 }
 1452         }
 1453 
 1454 out_locked:
 1455         MLD_UNLOCK();
 1456         IN6_MULTI_UNLOCK();
 1457 }
 1458 
 1459 /*
 1460  * Update host report group timer.
 1461  * Will update the global pending timer flags.
 1462  */
 1463 static void
 1464 mld_v1_process_group_timer(struct mld_ifinfo *mli, struct in6_multi *inm)
 1465 {
 1466         int report_timer_expired;
 1467 
 1468         IN6_MULTI_LOCK_ASSERT();
 1469         MLD_LOCK_ASSERT();
 1470 
 1471         if (inm->in6m_timer == 0) {
 1472                 report_timer_expired = 0;
 1473         } else if (--inm->in6m_timer == 0) {
 1474                 report_timer_expired = 1;
 1475         } else {
 1476                 V_current_state_timers_running6 = 1;
 1477                 return;
 1478         }
 1479 
 1480         switch (inm->in6m_state) {
 1481         case MLD_NOT_MEMBER:
 1482         case MLD_SILENT_MEMBER:
 1483         case MLD_IDLE_MEMBER:
 1484         case MLD_LAZY_MEMBER:
 1485         case MLD_SLEEPING_MEMBER:
 1486         case MLD_AWAKENING_MEMBER:
 1487                 break;
 1488         case MLD_REPORTING_MEMBER:
 1489                 if (report_timer_expired) {
 1490                         inm->in6m_state = MLD_IDLE_MEMBER;
 1491                         SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
 1492                             in6m_nrele);
 1493                 }
 1494                 break;
 1495         case MLD_G_QUERY_PENDING_MEMBER:
 1496         case MLD_SG_QUERY_PENDING_MEMBER:
 1497         case MLD_LEAVING_MEMBER:
 1498                 break;
 1499         }
 1500 }
 1501 
 1502 /*
 1503  * Update a group's timers for MLDv2.
 1504  * Will update the global pending timer flags.
 1505  * Note: Unlocked read from mli.
 1506  */
 1507 static void
 1508 mld_v2_process_group_timers(struct mld_ifinfo *mli,
 1509     struct ifqueue *qrq, struct ifqueue *scq,
 1510     struct in6_multi *inm, const int uri_fasthz)
 1511 {
 1512         int query_response_timer_expired;
 1513         int state_change_retransmit_timer_expired;
 1514 #ifdef KTR
 1515         char ip6tbuf[INET6_ADDRSTRLEN];
 1516 #endif
 1517 
 1518         IN6_MULTI_LOCK_ASSERT();
 1519         MLD_LOCK_ASSERT();
 1520 
 1521         query_response_timer_expired = 0;
 1522         state_change_retransmit_timer_expired = 0;
 1523 
 1524         /*
 1525          * During a transition from compatibility mode back to MLDv2,
 1526          * a group record in REPORTING state may still have its group
 1527          * timer active. This is a no-op in this function; it is easier
 1528          * to deal with it here than to complicate the slow-timeout path.
 1529          */
 1530         if (inm->in6m_timer == 0) {
 1531                 query_response_timer_expired = 0;
 1532         } else if (--inm->in6m_timer == 0) {
 1533                 query_response_timer_expired = 1;
 1534         } else {
 1535                 V_current_state_timers_running6 = 1;
 1536         }
 1537 
 1538         if (inm->in6m_sctimer == 0) {
 1539                 state_change_retransmit_timer_expired = 0;
 1540         } else if (--inm->in6m_sctimer == 0) {
 1541                 state_change_retransmit_timer_expired = 1;
 1542         } else {
 1543                 V_state_change_timers_running6 = 1;
 1544         }
 1545 
 1546         /* We are in fasttimo, so be quick about it. */
 1547         if (!state_change_retransmit_timer_expired &&
 1548             !query_response_timer_expired)
 1549                 return;
 1550 
 1551         switch (inm->in6m_state) {
 1552         case MLD_NOT_MEMBER:
 1553         case MLD_SILENT_MEMBER:
 1554         case MLD_SLEEPING_MEMBER:
 1555         case MLD_LAZY_MEMBER:
 1556         case MLD_AWAKENING_MEMBER:
 1557         case MLD_IDLE_MEMBER:
 1558                 break;
 1559         case MLD_G_QUERY_PENDING_MEMBER:
 1560         case MLD_SG_QUERY_PENDING_MEMBER:
 1561                 /*
 1562                  * Respond to a previously pending Group-Specific
 1563                  * or Group-and-Source-Specific query by enqueueing
 1564                  * the appropriate Current-State report for
 1565                  * immediate transmission.
 1566                  */
 1567                 if (query_response_timer_expired) {
 1568                         int retval;
 1569 
 1570                         retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1,
 1571                             (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER),
 1572                             0);
 1573                         CTR2(KTR_MLD, "%s: enqueue record = %d",
 1574                             __func__, retval);
 1575                         inm->in6m_state = MLD_REPORTING_MEMBER;
 1576                         in6m_clear_recorded(inm);
 1577                 }
 1578                 /* FALLTHROUGH */
 1579         case MLD_REPORTING_MEMBER:
 1580         case MLD_LEAVING_MEMBER:
 1581                 if (state_change_retransmit_timer_expired) {
 1582                         /*
 1583                          * State-change retransmission timer fired.
 1584                          * If there are any further pending retransmissions,
 1585                          * set the global pending state-change flag, and
 1586                          * reset the timer.
 1587                          */
 1588                         if (--inm->in6m_scrv > 0) {
 1589                                 inm->in6m_sctimer = uri_fasthz;
 1590                                 V_state_change_timers_running6 = 1;
 1591                         }
 1592                         /*
 1593                          * Retransmit the previously computed state-change
 1594                          * report. If there are no further pending
 1595                          * retransmissions, the mbuf queue will be consumed.
 1596                          * Update T0 state to T1 as we have now sent
 1597                          * a state-change.
 1598                          */
 1599                         (void)mld_v2_merge_state_changes(inm, scq);
 1600 
 1601                         in6m_commit(inm);
 1602                         CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
 1603                             ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 1604                             inm->in6m_ifp->if_xname);
 1605 
 1606                         /*
 1607                          * If we are leaving the group for good, make sure
 1608                          * we release MLD's reference to it.
 1609                          * This release must be deferred using a SLIST,
 1610                          * as we are called from a loop which traverses
 1611                          * the in_ifmultiaddr TAILQ.
 1612                          */
 1613                         if (inm->in6m_state == MLD_LEAVING_MEMBER &&
 1614                             inm->in6m_scrv == 0) {
 1615                                 inm->in6m_state = MLD_NOT_MEMBER;
 1616                                 SLIST_INSERT_HEAD(&mli->mli_relinmhead,
 1617                                     inm, in6m_nrele);
 1618                         }
 1619                 }
 1620                 break;
 1621         }
 1622 }
 1623 
 1624 /*
 1625  * Switch to a different version on the given interface,
 1626  * as per Section 9.12.
 1627  */
 1628 static void
 1629 mld_set_version(struct mld_ifinfo *mli, const int version)
 1630 {
 1631         int old_version_timer;
 1632 
 1633         MLD_LOCK_ASSERT();
 1634 
 1635         CTR4(KTR_MLD, "%s: switching to v%d on ifp %p(%s)", __func__,
 1636             version, mli->mli_ifp, mli->mli_ifp->if_xname);
 1637 
 1638         if (version == MLD_VERSION_1) {
 1639                 /*
 1640                  * Compute the "Older Version Querier Present" timer as per
 1641                  * Section 9.12.
 1642                  */
 1643                 old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri;
 1644                 old_version_timer *= PR_SLOWHZ;
 1645                 mli->mli_v1_timer = old_version_timer;
 1646         }
 1647 
 1648         if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) {
 1649                 mli->mli_version = MLD_VERSION_1;
 1650                 mld_v2_cancel_link_timers(mli);
 1651         }
 1652 }
 1653 
 1654 /*
 1655  * Cancel pending MLDv2 timers for the given link and all groups
 1656  * joined on it; state-change, general-query, and group-query timers.
 1657  */
 1658 static void
 1659 mld_v2_cancel_link_timers(struct mld_ifinfo *mli)
 1660 {
 1661         struct ifmultiaddr      *ifma;
 1662         struct ifnet            *ifp;
 1663         struct in6_multi        *inm, *tinm;
 1664 
 1665         CTR3(KTR_MLD, "%s: cancel v2 timers on ifp %p(%s)", __func__,
 1666             mli->mli_ifp, mli->mli_ifp->if_xname);
 1667 
 1668         IN6_MULTI_LOCK_ASSERT();
 1669         MLD_LOCK_ASSERT();
 1670 
 1671         /*
 1672          * Fast-track this potentially expensive operation
 1673          * by checking all the global 'timer pending' flags.
 1674          */
 1675         if (!V_interface_timers_running6 &&
 1676             !V_state_change_timers_running6 &&
 1677             !V_current_state_timers_running6)
 1678                 return;
 1679 
 1680         mli->mli_v2_timer = 0;
 1681 
 1682         ifp = mli->mli_ifp;
 1683 
 1684         IF_ADDR_LOCK(ifp);
 1685         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 1686                 if (ifma->ifma_addr->sa_family != AF_INET6)
 1687                         continue;
 1688                 inm = (struct in6_multi *)ifma->ifma_protospec;
 1689                 switch (inm->in6m_state) {
 1690                 case MLD_NOT_MEMBER:
 1691                 case MLD_SILENT_MEMBER:
 1692                 case MLD_IDLE_MEMBER:
 1693                 case MLD_LAZY_MEMBER:
 1694                 case MLD_SLEEPING_MEMBER:
 1695                 case MLD_AWAKENING_MEMBER:
 1696                         break;
 1697                 case MLD_LEAVING_MEMBER:
 1698                         /*
 1699                          * If we are leaving the group and switching
 1700                          * version, we need to release the final
 1701                          * reference held for issuing the INCLUDE {}.
 1702                          */
 1703                         SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
 1704                             in6m_nrele);
 1705                         /* FALLTHROUGH */
 1706                 case MLD_G_QUERY_PENDING_MEMBER:
 1707                 case MLD_SG_QUERY_PENDING_MEMBER:
 1708                         in6m_clear_recorded(inm);
 1709                         /* FALLTHROUGH */
 1710                 case MLD_REPORTING_MEMBER:
 1711                         inm->in6m_sctimer = 0;
 1712                         inm->in6m_timer = 0;
 1713                         inm->in6m_state = MLD_REPORTING_MEMBER;
 1714                         /*
 1715                          * Free any pending MLDv2 state-change records.
 1716                          */
 1717                         _IF_DRAIN(&inm->in6m_scq);
 1718                         break;
 1719                 }
 1720         }
 1721         IF_ADDR_UNLOCK(ifp);
 1722         SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead, in6m_nrele, tinm) {
 1723                 SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
 1724                 in6m_release_locked(inm);
 1725         }
 1726 }
 1727 
 1728 /*
 1729  * Global slowtimo handler.
 1730  * VIMAGE: Timeout handlers are expected to service all vimages.
 1731  */
 1732 void
 1733 mld_slowtimo(void)
 1734 {
 1735         VNET_ITERATOR_DECL(vnet_iter);
 1736 
 1737         VNET_LIST_RLOCK_NOSLEEP();
 1738         VNET_FOREACH(vnet_iter) {
 1739                 CURVNET_SET(vnet_iter);
 1740                 mld_slowtimo_vnet();
 1741                 CURVNET_RESTORE();
 1742         }
 1743         VNET_LIST_RUNLOCK_NOSLEEP();
 1744 }
 1745 
 1746 /*
 1747  * Per-vnet slowtimo handler.
 1748  */
 1749 static void
 1750 mld_slowtimo_vnet(void)
 1751 {
 1752         struct mld_ifinfo *mli;
 1753 
 1754         MLD_LOCK();
 1755 
 1756         LIST_FOREACH(mli, &V_mli_head, mli_link) {
 1757                 mld_v1_process_querier_timers(mli);
 1758         }
 1759 
 1760         MLD_UNLOCK();
 1761 }
 1762 
 1763 /*
 1764  * Update the Older Version Querier Present timers for a link.
 1765  * See Section 9.12 of RFC 3810.
 1766  */
 1767 static void
 1768 mld_v1_process_querier_timers(struct mld_ifinfo *mli)
 1769 {
 1770 
 1771         MLD_LOCK_ASSERT();
 1772 
 1773         if (mli->mli_version != MLD_VERSION_2 && --mli->mli_v1_timer == 0) {
 1774                 /*
 1775                  * MLDv1 Querier Present timer expired; revert to MLDv2.
 1776                  */
 1777                 CTR5(KTR_MLD,
 1778                     "%s: transition from v%d -> v%d on %p(%s)",
 1779                     __func__, mli->mli_version, MLD_VERSION_2,
 1780                     mli->mli_ifp, mli->mli_ifp->if_xname);
 1781                 mli->mli_version = MLD_VERSION_2;
 1782         }
 1783 }
 1784 
 1785 /*
 1786  * Transmit an MLDv1 report immediately.
 1787  */
 1788 static int
 1789 mld_v1_transmit_report(struct in6_multi *in6m, const int type)
 1790 {
 1791         struct ifnet            *ifp;
 1792         struct in6_ifaddr       *ia;
 1793         struct ip6_hdr          *ip6;
 1794         struct mbuf             *mh, *md;
 1795         struct mld_hdr          *mld;
 1796 
 1797         IN6_MULTI_LOCK_ASSERT();
 1798         MLD_LOCK_ASSERT();
 1799 
 1800         ifp = in6m->in6m_ifp;
 1801         ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
 1802         /* ia may be NULL if link-local address is tentative. */
 1803 
 1804         MGETHDR(mh, M_DONTWAIT, MT_HEADER);
 1805         if (mh == NULL) {
 1806                 if (ia != NULL)
 1807                         ifa_free(&ia->ia_ifa);
 1808                 return (ENOMEM);
 1809         }
 1810         MGET(md, M_DONTWAIT, MT_DATA);
 1811         if (md == NULL) {
 1812                 m_free(mh);
 1813                 if (ia != NULL)
 1814                         ifa_free(&ia->ia_ifa);
 1815                 return (ENOMEM);
 1816         }
 1817         mh->m_next = md;
 1818 
 1819         /*
 1820          * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so
 1821          * that ether_output() does not need to allocate another mbuf
 1822          * for the header in the most common case.
 1823          */
 1824         MH_ALIGN(mh, sizeof(struct ip6_hdr));
 1825         mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr);
 1826         mh->m_len = sizeof(struct ip6_hdr);
 1827 
 1828         ip6 = mtod(mh, struct ip6_hdr *);
 1829         ip6->ip6_flow = 0;
 1830         ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
 1831         ip6->ip6_vfc |= IPV6_VERSION;
 1832         ip6->ip6_nxt = IPPROTO_ICMPV6;
 1833         ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
 1834         ip6->ip6_dst = in6m->in6m_addr;
 1835 
 1836         md->m_len = sizeof(struct mld_hdr);
 1837         mld = mtod(md, struct mld_hdr *);
 1838         mld->mld_type = type;
 1839         mld->mld_code = 0;
 1840         mld->mld_cksum = 0;
 1841         mld->mld_maxdelay = 0;
 1842         mld->mld_reserved = 0;
 1843         mld->mld_addr = in6m->in6m_addr;
 1844         in6_clearscope(&mld->mld_addr);
 1845         mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
 1846             sizeof(struct ip6_hdr), sizeof(struct mld_hdr));
 1847 
 1848         mld_save_context(mh, ifp);
 1849         mh->m_flags |= M_MLDV1;
 1850 
 1851         mld_dispatch_packet(mh);
 1852 
 1853         if (ia != NULL)
 1854                 ifa_free(&ia->ia_ifa);
 1855         return (0);
 1856 }
 1857 
 1858 /*
 1859  * Process a state change from the upper layer for the given IPv6 group.
 1860  *
 1861  * Each socket holds a reference on the in_multi in its own ip_moptions.
 1862  * The socket layer will have made the necessary updates to.the group
 1863  * state, it is now up to MLD to issue a state change report if there
 1864  * has been any change between T0 (when the last state-change was issued)
 1865  * and T1 (now).
 1866  *
 1867  * We use the MLDv2 state machine at group level. The MLd module
 1868  * however makes the decision as to which MLD protocol version to speak.
 1869  * A state change *from* INCLUDE {} always means an initial join.
 1870  * A state change *to* INCLUDE {} always means a final leave.
 1871  *
 1872  * If delay is non-zero, and the state change is an initial multicast
 1873  * join, the state change report will be delayed by 'delay' ticks
 1874  * in units of PR_FASTHZ if MLDv1 is active on the link; otherwise
 1875  * the initial MLDv2 state change report will be delayed by whichever
 1876  * is sooner, a pending state-change timer or delay itself.
 1877  *
 1878  * VIMAGE: curvnet should have been set by caller, as this routine
 1879  * is called from the socket option handlers.
 1880  */
 1881 int
 1882 mld_change_state(struct in6_multi *inm, const int delay)
 1883 {
 1884         struct mld_ifinfo *mli;
 1885         struct ifnet *ifp;
 1886         int error;
 1887 
 1888         IN6_MULTI_LOCK_ASSERT();
 1889 
 1890         error = 0;
 1891 
 1892         /*
 1893          * Try to detect if the upper layer just asked us to change state
 1894          * for an interface which has now gone away.
 1895          */
 1896         KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__));
 1897         ifp = inm->in6m_ifma->ifma_ifp;
 1898         if (ifp != NULL) {
 1899                 /*
 1900                  * Sanity check that netinet6's notion of ifp is the
 1901                  * same as net's.
 1902                  */
 1903                 KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__));
 1904         }
 1905 
 1906         MLD_LOCK();
 1907 
 1908         mli = MLD_IFINFO(ifp);
 1909         KASSERT(mli != NULL, ("%s: no mld_ifinfo for ifp %p", __func__, ifp));
 1910 
 1911         /*
 1912          * If we detect a state transition to or from MCAST_UNDEFINED
 1913          * for this group, then we are starting or finishing an MLD
 1914          * life cycle for this group.
 1915          */
 1916         if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) {
 1917                 CTR3(KTR_MLD, "%s: inm transition %d -> %d", __func__,
 1918                     inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode);
 1919                 if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) {
 1920                         CTR1(KTR_MLD, "%s: initial join", __func__);
 1921                         error = mld_initial_join(inm, mli, delay);
 1922                         goto out_locked;
 1923                 } else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) {
 1924                         CTR1(KTR_MLD, "%s: final leave", __func__);
 1925                         mld_final_leave(inm, mli);
 1926                         goto out_locked;
 1927                 }
 1928         } else {
 1929                 CTR1(KTR_MLD, "%s: filter set change", __func__);
 1930         }
 1931 
 1932         error = mld_handle_state_change(inm, mli);
 1933 
 1934 out_locked:
 1935         MLD_UNLOCK();
 1936         return (error);
 1937 }
 1938 
 1939 /*
 1940  * Perform the initial join for an MLD group.
 1941  *
 1942  * When joining a group:
 1943  *  If the group should have its MLD traffic suppressed, do nothing.
 1944  *  MLDv1 starts sending MLDv1 host membership reports.
 1945  *  MLDv2 will schedule an MLDv2 state-change report containing the
 1946  *  initial state of the membership.
 1947  *
 1948  * If the delay argument is non-zero, then we must delay sending the
 1949  * initial state change for delay ticks (in units of PR_FASTHZ).
 1950  */
 1951 static int
 1952 mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli,
 1953     const int delay)
 1954 {
 1955         struct ifnet            *ifp;
 1956         struct ifqueue          *ifq;
 1957         int                      error, retval, syncstates;
 1958         int                      odelay;
 1959 #ifdef KTR
 1960         char                     ip6tbuf[INET6_ADDRSTRLEN];
 1961 #endif
 1962 
 1963         CTR4(KTR_MLD, "%s: initial join %s on ifp %p(%s)",
 1964             __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 1965             inm->in6m_ifp, inm->in6m_ifp->if_xname);
 1966 
 1967         error = 0;
 1968         syncstates = 1;
 1969 
 1970         ifp = inm->in6m_ifp;
 1971 
 1972         IN6_MULTI_LOCK_ASSERT();
 1973         MLD_LOCK_ASSERT();
 1974 
 1975         KASSERT(mli && mli->mli_ifp == ifp, ("%s: inconsistent ifp", __func__));
 1976 
 1977         /*
 1978          * Groups joined on loopback or marked as 'not reported',
 1979          * enter the MLD_SILENT_MEMBER state and
 1980          * are never reported in any protocol exchanges.
 1981          * All other groups enter the appropriate state machine
 1982          * for the version in use on this link.
 1983          * A link marked as MLIF_SILENT causes MLD to be completely
 1984          * disabled for the link.
 1985          */
 1986         if ((ifp->if_flags & IFF_LOOPBACK) ||
 1987             (mli->mli_flags & MLIF_SILENT) ||
 1988             !mld_is_addr_reported(&inm->in6m_addr)) {
 1989                 CTR1(KTR_MLD,
 1990 "%s: not kicking state machine for silent group", __func__);
 1991                 inm->in6m_state = MLD_SILENT_MEMBER;
 1992                 inm->in6m_timer = 0;
 1993         } else {
 1994                 /*
 1995                  * Deal with overlapping in_multi lifecycle.
 1996                  * If this group was LEAVING, then make sure
 1997                  * we drop the reference we picked up to keep the
 1998                  * group around for the final INCLUDE {} enqueue.
 1999                  */
 2000                 if (mli->mli_version == MLD_VERSION_2 &&
 2001                     inm->in6m_state == MLD_LEAVING_MEMBER)
 2002                         in6m_release_locked(inm);
 2003 
 2004                 inm->in6m_state = MLD_REPORTING_MEMBER;
 2005 
 2006                 switch (mli->mli_version) {
 2007                 case MLD_VERSION_1:
 2008                         /*
 2009                          * If a delay was provided, only use it if
 2010                          * it is greater than the delay normally
 2011                          * used for an MLDv1 state change report,
 2012                          * and delay sending the initial MLDv1 report
 2013                          * by not transitioning to the IDLE state.
 2014                          */
 2015                         odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI * PR_FASTHZ);
 2016                         if (delay) {
 2017                                 inm->in6m_timer = max(delay, odelay);
 2018                                 V_current_state_timers_running6 = 1;
 2019                         } else {
 2020                                 inm->in6m_state = MLD_IDLE_MEMBER;
 2021                                 error = mld_v1_transmit_report(inm,
 2022                                      MLD_LISTENER_REPORT);
 2023                                 if (error == 0) {
 2024                                         inm->in6m_timer = odelay;
 2025                                         V_current_state_timers_running6 = 1;
 2026                                 }
 2027                         }
 2028                         break;
 2029 
 2030                 case MLD_VERSION_2:
 2031                         /*
 2032                          * Defer update of T0 to T1, until the first copy
 2033                          * of the state change has been transmitted.
 2034                          */
 2035                         syncstates = 0;
 2036 
 2037                         /*
 2038                          * Immediately enqueue a State-Change Report for
 2039                          * this interface, freeing any previous reports.
 2040                          * Don't kick the timers if there is nothing to do,
 2041                          * or if an error occurred.
 2042                          */
 2043                         ifq = &inm->in6m_scq;
 2044                         _IF_DRAIN(ifq);
 2045                         retval = mld_v2_enqueue_group_record(ifq, inm, 1,
 2046                             0, 0, (mli->mli_flags & MLIF_USEALLOW));
 2047                         CTR2(KTR_MLD, "%s: enqueue record = %d",
 2048                             __func__, retval);
 2049                         if (retval <= 0) {
 2050                                 error = retval * -1;
 2051                                 break;
 2052                         }
 2053 
 2054                         /*
 2055                          * Schedule transmission of pending state-change
 2056                          * report up to RV times for this link. The timer
 2057                          * will fire at the next mld_fasttimo (~200ms),
 2058                          * giving us an opportunity to merge the reports.
 2059                          *
 2060                          * If a delay was provided to this function, only
 2061                          * use this delay if sooner than the existing one.
 2062                          */
 2063                         KASSERT(mli->mli_rv > 1,
 2064                            ("%s: invalid robustness %d", __func__,
 2065                             mli->mli_rv));
 2066                         inm->in6m_scrv = mli->mli_rv;
 2067                         if (delay) {
 2068                                 if (inm->in6m_sctimer > 1) {
 2069                                         inm->in6m_sctimer =
 2070                                             min(inm->in6m_sctimer, delay);
 2071                                 } else
 2072                                         inm->in6m_sctimer = delay;
 2073                         } else
 2074                                 inm->in6m_sctimer = 1;
 2075                         V_state_change_timers_running6 = 1;
 2076 
 2077                         error = 0;
 2078                         break;
 2079                 }
 2080         }
 2081 
 2082         /*
 2083          * Only update the T0 state if state change is atomic,
 2084          * i.e. we don't need to wait for a timer to fire before we
 2085          * can consider the state change to have been communicated.
 2086          */
 2087         if (syncstates) {
 2088                 in6m_commit(inm);
 2089                 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
 2090                     ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2091                     inm->in6m_ifp->if_xname);
 2092         }
 2093 
 2094         return (error);
 2095 }
 2096 
 2097 /*
 2098  * Issue an intermediate state change during the life-cycle.
 2099  */
 2100 static int
 2101 mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli)
 2102 {
 2103         struct ifnet            *ifp;
 2104         int                      retval;
 2105 #ifdef KTR
 2106         char                     ip6tbuf[INET6_ADDRSTRLEN];
 2107 #endif
 2108 
 2109         CTR4(KTR_MLD, "%s: state change for %s on ifp %p(%s)",
 2110             __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2111             inm->in6m_ifp, inm->in6m_ifp->if_xname);
 2112 
 2113         ifp = inm->in6m_ifp;
 2114 
 2115         IN6_MULTI_LOCK_ASSERT();
 2116         MLD_LOCK_ASSERT();
 2117 
 2118         KASSERT(mli && mli->mli_ifp == ifp,
 2119             ("%s: inconsistent ifp", __func__));
 2120 
 2121         if ((ifp->if_flags & IFF_LOOPBACK) ||
 2122             (mli->mli_flags & MLIF_SILENT) ||
 2123             !mld_is_addr_reported(&inm->in6m_addr) ||
 2124             (mli->mli_version != MLD_VERSION_2)) {
 2125                 if (!mld_is_addr_reported(&inm->in6m_addr)) {
 2126                         CTR1(KTR_MLD,
 2127 "%s: not kicking state machine for silent group", __func__);
 2128                 }
 2129                 CTR1(KTR_MLD, "%s: nothing to do", __func__);
 2130                 in6m_commit(inm);
 2131                 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
 2132                     ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2133                     inm->in6m_ifp->if_xname);
 2134                 return (0);
 2135         }
 2136 
 2137         _IF_DRAIN(&inm->in6m_scq);
 2138 
 2139         retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0,
 2140             (mli->mli_flags & MLIF_USEALLOW));
 2141         CTR2(KTR_MLD, "%s: enqueue record = %d", __func__, retval);
 2142         if (retval <= 0)
 2143                 return (-retval);
 2144 
 2145         /*
 2146          * If record(s) were enqueued, start the state-change
 2147          * report timer for this group.
 2148          */
 2149         inm->in6m_scrv = mli->mli_rv;
 2150         inm->in6m_sctimer = 1;
 2151         V_state_change_timers_running6 = 1;
 2152 
 2153         return (0);
 2154 }
 2155 
 2156 /*
 2157  * Perform the final leave for a multicast address.
 2158  *
 2159  * When leaving a group:
 2160  *  MLDv1 sends a DONE message, if and only if we are the reporter.
 2161  *  MLDv2 enqueues a state-change report containing a transition
 2162  *  to INCLUDE {} for immediate transmission.
 2163  */
 2164 static void
 2165 mld_final_leave(struct in6_multi *inm, struct mld_ifinfo *mli)
 2166 {
 2167         int syncstates;
 2168 #ifdef KTR
 2169         char ip6tbuf[INET6_ADDRSTRLEN];
 2170 #endif
 2171 
 2172         syncstates = 1;
 2173 
 2174         CTR4(KTR_MLD, "%s: final leave %s on ifp %p(%s)",
 2175             __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2176             inm->in6m_ifp, inm->in6m_ifp->if_xname);
 2177 
 2178         IN6_MULTI_LOCK_ASSERT();
 2179         MLD_LOCK_ASSERT();
 2180 
 2181         switch (inm->in6m_state) {
 2182         case MLD_NOT_MEMBER:
 2183         case MLD_SILENT_MEMBER:
 2184         case MLD_LEAVING_MEMBER:
 2185                 /* Already leaving or left; do nothing. */
 2186                 CTR1(KTR_MLD,
 2187 "%s: not kicking state machine for silent group", __func__);
 2188                 break;
 2189         case MLD_REPORTING_MEMBER:
 2190         case MLD_IDLE_MEMBER:
 2191         case MLD_G_QUERY_PENDING_MEMBER:
 2192         case MLD_SG_QUERY_PENDING_MEMBER:
 2193                 if (mli->mli_version == MLD_VERSION_1) {
 2194 #ifdef INVARIANTS
 2195                         if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
 2196                             inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER)
 2197                         panic("%s: MLDv2 state reached, not MLDv2 mode",
 2198                              __func__);
 2199 #endif
 2200                         mld_v1_transmit_report(inm, MLD_LISTENER_DONE);
 2201                         inm->in6m_state = MLD_NOT_MEMBER;
 2202                 } else if (mli->mli_version == MLD_VERSION_2) {
 2203                         /*
 2204                          * Stop group timer and all pending reports.
 2205                          * Immediately enqueue a state-change report
 2206                          * TO_IN {} to be sent on the next fast timeout,
 2207                          * giving us an opportunity to merge reports.
 2208                          */
 2209                         _IF_DRAIN(&inm->in6m_scq);
 2210                         inm->in6m_timer = 0;
 2211                         inm->in6m_scrv = mli->mli_rv;
 2212                         CTR4(KTR_MLD, "%s: Leaving %s/%s with %d "
 2213                             "pending retransmissions.", __func__,
 2214                             ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2215                             inm->in6m_ifp->if_xname, inm->in6m_scrv);
 2216                         if (inm->in6m_scrv == 0) {
 2217                                 inm->in6m_state = MLD_NOT_MEMBER;
 2218                                 inm->in6m_sctimer = 0;
 2219                         } else {
 2220                                 int retval;
 2221 
 2222                                 in6m_acquire_locked(inm);
 2223 
 2224                                 retval = mld_v2_enqueue_group_record(
 2225                                     &inm->in6m_scq, inm, 1, 0, 0,
 2226                                     (mli->mli_flags & MLIF_USEALLOW));
 2227                                 KASSERT(retval != 0,
 2228                                     ("%s: enqueue record = %d", __func__,
 2229                                      retval));
 2230 
 2231                                 inm->in6m_state = MLD_LEAVING_MEMBER;
 2232                                 inm->in6m_sctimer = 1;
 2233                                 V_state_change_timers_running6 = 1;
 2234                                 syncstates = 0;
 2235                         }
 2236                         break;
 2237                 }
 2238                 break;
 2239         case MLD_LAZY_MEMBER:
 2240         case MLD_SLEEPING_MEMBER:
 2241         case MLD_AWAKENING_MEMBER:
 2242                 /* Our reports are suppressed; do nothing. */
 2243                 break;
 2244         }
 2245 
 2246         if (syncstates) {
 2247                 in6m_commit(inm);
 2248                 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
 2249                     ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2250                     inm->in6m_ifp->if_xname);
 2251                 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
 2252                 CTR3(KTR_MLD, "%s: T1 now MCAST_UNDEFINED for %p/%s",
 2253                     __func__, &inm->in6m_addr, inm->in6m_ifp->if_xname);
 2254         }
 2255 }
 2256 
 2257 /*
 2258  * Enqueue an MLDv2 group record to the given output queue.
 2259  *
 2260  * If is_state_change is zero, a current-state record is appended.
 2261  * If is_state_change is non-zero, a state-change report is appended.
 2262  *
 2263  * If is_group_query is non-zero, an mbuf packet chain is allocated.
 2264  * If is_group_query is zero, and if there is a packet with free space
 2265  * at the tail of the queue, it will be appended to providing there
 2266  * is enough free space.
 2267  * Otherwise a new mbuf packet chain is allocated.
 2268  *
 2269  * If is_source_query is non-zero, each source is checked to see if
 2270  * it was recorded for a Group-Source query, and will be omitted if
 2271  * it is not both in-mode and recorded.
 2272  *
 2273  * If use_block_allow is non-zero, state change reports for initial join
 2274  * and final leave, on an inclusive mode group with a source list, will be
 2275  * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively.
 2276  *
 2277  * The function will attempt to allocate leading space in the packet
 2278  * for the IPv6+ICMP headers to be prepended without fragmenting the chain.
 2279  *
 2280  * If successful the size of all data appended to the queue is returned,
 2281  * otherwise an error code less than zero is returned, or zero if
 2282  * no record(s) were appended.
 2283  */
 2284 static int
 2285 mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm,
 2286     const int is_state_change, const int is_group_query,
 2287     const int is_source_query, const int use_block_allow)
 2288 {
 2289         struct mldv2_record      mr;
 2290         struct mldv2_record     *pmr;
 2291         struct ifnet            *ifp;
 2292         struct ip6_msource      *ims, *nims;
 2293         struct mbuf             *m0, *m, *md;
 2294         int                      error, is_filter_list_change;
 2295         int                      minrec0len, m0srcs, msrcs, nbytes, off;
 2296         int                      record_has_sources;
 2297         int                      now;
 2298         int                      type;
 2299         uint8_t                  mode;
 2300 #ifdef KTR
 2301         char                     ip6tbuf[INET6_ADDRSTRLEN];
 2302 #endif
 2303 
 2304         IN6_MULTI_LOCK_ASSERT();
 2305 
 2306         error = 0;
 2307         ifp = inm->in6m_ifp;
 2308         is_filter_list_change = 0;
 2309         m = NULL;
 2310         m0 = NULL;
 2311         m0srcs = 0;
 2312         msrcs = 0;
 2313         nbytes = 0;
 2314         nims = NULL;
 2315         record_has_sources = 1;
 2316         pmr = NULL;
 2317         type = MLD_DO_NOTHING;
 2318         mode = inm->in6m_st[1].iss_fmode;
 2319 
 2320         /*
 2321          * If we did not transition out of ASM mode during t0->t1,
 2322          * and there are no source nodes to process, we can skip
 2323          * the generation of source records.
 2324          */
 2325         if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 &&
 2326             inm->in6m_nsrc == 0)
 2327                 record_has_sources = 0;
 2328 
 2329         if (is_state_change) {
 2330                 /*
 2331                  * Queue a state change record.
 2332                  * If the mode did not change, and there are non-ASM
 2333                  * listeners or source filters present,
 2334                  * we potentially need to issue two records for the group.
 2335                  * If there are ASM listeners, and there was no filter
 2336                  * mode transition of any kind, do nothing.
 2337                  *
 2338                  * If we are transitioning to MCAST_UNDEFINED, we need
 2339                  * not send any sources. A transition to/from this state is
 2340                  * considered inclusive with some special treatment.
 2341                  *
 2342                  * If we are rewriting initial joins/leaves to use
 2343                  * ALLOW/BLOCK, and the group's membership is inclusive,
 2344                  * we need to send sources in all cases.
 2345                  */
 2346                 if (mode != inm->in6m_st[0].iss_fmode) {
 2347                         if (mode == MCAST_EXCLUDE) {
 2348                                 CTR1(KTR_MLD, "%s: change to EXCLUDE",
 2349                                     __func__);
 2350                                 type = MLD_CHANGE_TO_EXCLUDE_MODE;
 2351                         } else {
 2352                                 CTR1(KTR_MLD, "%s: change to INCLUDE",
 2353                                     __func__);
 2354                                 if (use_block_allow) {
 2355                                         /*
 2356                                          * XXX
 2357                                          * Here we're interested in state
 2358                                          * edges either direction between
 2359                                          * MCAST_UNDEFINED and MCAST_INCLUDE.
 2360                                          * Perhaps we should just check
 2361                                          * the group state, rather than
 2362                                          * the filter mode.
 2363                                          */
 2364                                         if (mode == MCAST_UNDEFINED) {
 2365                                                 type = MLD_BLOCK_OLD_SOURCES;
 2366                                         } else {
 2367                                                 type = MLD_ALLOW_NEW_SOURCES;
 2368                                         }
 2369                                 } else {
 2370                                         type = MLD_CHANGE_TO_INCLUDE_MODE;
 2371                                         if (mode == MCAST_UNDEFINED)
 2372                                                 record_has_sources = 0;
 2373                                 }
 2374                         }
 2375                 } else {
 2376                         if (record_has_sources) {
 2377                                 is_filter_list_change = 1;
 2378                         } else {
 2379                                 type = MLD_DO_NOTHING;
 2380                         }
 2381                 }
 2382         } else {
 2383                 /*
 2384                  * Queue a current state record.
 2385                  */
 2386                 if (mode == MCAST_EXCLUDE) {
 2387                         type = MLD_MODE_IS_EXCLUDE;
 2388                 } else if (mode == MCAST_INCLUDE) {
 2389                         type = MLD_MODE_IS_INCLUDE;
 2390                         KASSERT(inm->in6m_st[1].iss_asm == 0,
 2391                             ("%s: inm %p is INCLUDE but ASM count is %d",
 2392                              __func__, inm, inm->in6m_st[1].iss_asm));
 2393                 }
 2394         }
 2395 
 2396         /*
 2397          * Generate the filter list changes using a separate function.
 2398          */
 2399         if (is_filter_list_change)
 2400                 return (mld_v2_enqueue_filter_change(ifq, inm));
 2401 
 2402         if (type == MLD_DO_NOTHING) {
 2403                 CTR3(KTR_MLD, "%s: nothing to do for %s/%s",
 2404                     __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2405                     inm->in6m_ifp->if_xname);
 2406                 return (0);
 2407         }
 2408 
 2409         /*
 2410          * If any sources are present, we must be able to fit at least
 2411          * one in the trailing space of the tail packet's mbuf,
 2412          * ideally more.
 2413          */
 2414         minrec0len = sizeof(struct mldv2_record);
 2415         if (record_has_sources)
 2416                 minrec0len += sizeof(struct in6_addr);
 2417 
 2418         CTR4(KTR_MLD, "%s: queueing %s for %s/%s", __func__,
 2419             mld_rec_type_to_str(type),
 2420             ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2421             inm->in6m_ifp->if_xname);
 2422 
 2423         /*
 2424          * Check if we have a packet in the tail of the queue for this
 2425          * group into which the first group record for this group will fit.
 2426          * Otherwise allocate a new packet.
 2427          * Always allocate leading space for IP6+RA+ICMPV6+REPORT.
 2428          * Note: Group records for G/GSR query responses MUST be sent
 2429          * in their own packet.
 2430          */
 2431         m0 = ifq->ifq_tail;
 2432         if (!is_group_query &&
 2433             m0 != NULL &&
 2434             (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) &&
 2435             (m0->m_pkthdr.len + minrec0len) <
 2436              (ifp->if_mtu - MLD_MTUSPACE)) {
 2437                 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
 2438                             sizeof(struct mldv2_record)) /
 2439                             sizeof(struct in6_addr);
 2440                 m = m0;
 2441                 CTR1(KTR_MLD, "%s: use existing packet", __func__);
 2442         } else {
 2443                 if (_IF_QFULL(ifq)) {
 2444                         CTR1(KTR_MLD, "%s: outbound queue full", __func__);
 2445                         return (-ENOMEM);
 2446                 }
 2447                 m = NULL;
 2448                 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
 2449                     sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
 2450                 if (!is_state_change && !is_group_query)
 2451                         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 2452                 if (m == NULL)
 2453                         m = m_gethdr(M_DONTWAIT, MT_DATA);
 2454                 if (m == NULL)
 2455                         return (-ENOMEM);
 2456 
 2457                 mld_save_context(m, ifp);
 2458 
 2459                 CTR1(KTR_MLD, "%s: allocated first packet", __func__);
 2460         }
 2461 
 2462         /*
 2463          * Append group record.
 2464          * If we have sources, we don't know how many yet.
 2465          */
 2466         mr.mr_type = type;
 2467         mr.mr_datalen = 0;
 2468         mr.mr_numsrc = 0;
 2469         mr.mr_addr = inm->in6m_addr;
 2470         in6_clearscope(&mr.mr_addr);
 2471         if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
 2472                 if (m != m0)
 2473                         m_freem(m);
 2474                 CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
 2475                 return (-ENOMEM);
 2476         }
 2477         nbytes += sizeof(struct mldv2_record);
 2478 
 2479         /*
 2480          * Append as many sources as will fit in the first packet.
 2481          * If we are appending to a new packet, the chain allocation
 2482          * may potentially use clusters; use m_getptr() in this case.
 2483          * If we are appending to an existing packet, we need to obtain
 2484          * a pointer to the group record after m_append(), in case a new
 2485          * mbuf was allocated.
 2486          *
 2487          * Only append sources which are in-mode at t1. If we are
 2488          * transitioning to MCAST_UNDEFINED state on the group, and
 2489          * use_block_allow is zero, do not include source entries.
 2490          * Otherwise, we need to include this source in the report.
 2491          *
 2492          * Only report recorded sources in our filter set when responding
 2493          * to a group-source query.
 2494          */
 2495         if (record_has_sources) {
 2496                 if (m == m0) {
 2497                         md = m_last(m);
 2498                         pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
 2499                             md->m_len - nbytes);
 2500                 } else {
 2501                         md = m_getptr(m, 0, &off);
 2502                         pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
 2503                             off);
 2504                 }
 2505                 msrcs = 0;
 2506                 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs,
 2507                     nims) {
 2508                         CTR2(KTR_MLD, "%s: visit node %s", __func__,
 2509                             ip6_sprintf(ip6tbuf, &ims->im6s_addr));
 2510                         now = im6s_get_mode(inm, ims, 1);
 2511                         CTR2(KTR_MLD, "%s: node is %d", __func__, now);
 2512                         if ((now != mode) ||
 2513                             (now == mode &&
 2514                              (!use_block_allow && mode == MCAST_UNDEFINED))) {
 2515                                 CTR1(KTR_MLD, "%s: skip node", __func__);
 2516                                 continue;
 2517                         }
 2518                         if (is_source_query && ims->im6s_stp == 0) {
 2519                                 CTR1(KTR_MLD, "%s: skip unrecorded node",
 2520                                     __func__);
 2521                                 continue;
 2522                         }
 2523                         CTR1(KTR_MLD, "%s: append node", __func__);
 2524                         if (!m_append(m, sizeof(struct in6_addr),
 2525                             (void *)&ims->im6s_addr)) {
 2526                                 if (m != m0)
 2527                                         m_freem(m);
 2528                                 CTR1(KTR_MLD, "%s: m_append() failed.",
 2529                                     __func__);
 2530                                 return (-ENOMEM);
 2531                         }
 2532                         nbytes += sizeof(struct in6_addr);
 2533                         ++msrcs;
 2534                         if (msrcs == m0srcs)
 2535                                 break;
 2536                 }
 2537                 CTR2(KTR_MLD, "%s: msrcs is %d this packet", __func__,
 2538                     msrcs);
 2539                 pmr->mr_numsrc = htons(msrcs);
 2540                 nbytes += (msrcs * sizeof(struct in6_addr));
 2541         }
 2542 
 2543         if (is_source_query && msrcs == 0) {
 2544                 CTR1(KTR_MLD, "%s: no recorded sources to report", __func__);
 2545                 if (m != m0)
 2546                         m_freem(m);
 2547                 return (0);
 2548         }
 2549 
 2550         /*
 2551          * We are good to go with first packet.
 2552          */
 2553         if (m != m0) {
 2554                 CTR1(KTR_MLD, "%s: enqueueing first packet", __func__);
 2555                 m->m_pkthdr.PH_vt.vt_nrecs = 1;
 2556                 _IF_ENQUEUE(ifq, m);
 2557         } else
 2558                 m->m_pkthdr.PH_vt.vt_nrecs++;
 2559 
 2560         /*
 2561          * No further work needed if no source list in packet(s).
 2562          */
 2563         if (!record_has_sources)
 2564                 return (nbytes);
 2565 
 2566         /*
 2567          * Whilst sources remain to be announced, we need to allocate
 2568          * a new packet and fill out as many sources as will fit.
 2569          * Always try for a cluster first.
 2570          */
 2571         while (nims != NULL) {
 2572                 if (_IF_QFULL(ifq)) {
 2573                         CTR1(KTR_MLD, "%s: outbound queue full", __func__);
 2574                         return (-ENOMEM);
 2575                 }
 2576                 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 2577                 if (m == NULL)
 2578                         m = m_gethdr(M_DONTWAIT, MT_DATA);
 2579                 if (m == NULL)
 2580                         return (-ENOMEM);
 2581                 mld_save_context(m, ifp);
 2582                 md = m_getptr(m, 0, &off);
 2583                 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off);
 2584                 CTR1(KTR_MLD, "%s: allocated next packet", __func__);
 2585 
 2586                 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
 2587                         if (m != m0)
 2588                                 m_freem(m);
 2589                         CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
 2590                         return (-ENOMEM);
 2591                 }
 2592                 m->m_pkthdr.PH_vt.vt_nrecs = 1;
 2593                 nbytes += sizeof(struct mldv2_record);
 2594 
 2595                 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
 2596                     sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
 2597 
 2598                 msrcs = 0;
 2599                 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
 2600                         CTR2(KTR_MLD, "%s: visit node %s",
 2601                             __func__, ip6_sprintf(ip6tbuf, &ims->im6s_addr));
 2602                         now = im6s_get_mode(inm, ims, 1);
 2603                         if ((now != mode) ||
 2604                             (now == mode &&
 2605                              (!use_block_allow && mode == MCAST_UNDEFINED))) {
 2606                                 CTR1(KTR_MLD, "%s: skip node", __func__);
 2607                                 continue;
 2608                         }
 2609                         if (is_source_query && ims->im6s_stp == 0) {
 2610                                 CTR1(KTR_MLD, "%s: skip unrecorded node",
 2611                                     __func__);
 2612                                 continue;
 2613                         }
 2614                         CTR1(KTR_MLD, "%s: append node", __func__);
 2615                         if (!m_append(m, sizeof(struct in6_addr),
 2616                             (void *)&ims->im6s_addr)) {
 2617                                 if (m != m0)
 2618                                         m_freem(m);
 2619                                 CTR1(KTR_MLD, "%s: m_append() failed.",
 2620                                     __func__);
 2621                                 return (-ENOMEM);
 2622                         }
 2623                         ++msrcs;
 2624                         if (msrcs == m0srcs)
 2625                                 break;
 2626                 }
 2627                 pmr->mr_numsrc = htons(msrcs);
 2628                 nbytes += (msrcs * sizeof(struct in6_addr));
 2629 
 2630                 CTR1(KTR_MLD, "%s: enqueueing next packet", __func__);
 2631                 _IF_ENQUEUE(ifq, m);
 2632         }
 2633 
 2634         return (nbytes);
 2635 }
 2636 
 2637 /*
 2638  * Type used to mark record pass completion.
 2639  * We exploit the fact we can cast to this easily from the
 2640  * current filter modes on each ip_msource node.
 2641  */
 2642 typedef enum {
 2643         REC_NONE = 0x00,        /* MCAST_UNDEFINED */
 2644         REC_ALLOW = 0x01,       /* MCAST_INCLUDE */
 2645         REC_BLOCK = 0x02,       /* MCAST_EXCLUDE */
 2646         REC_FULL = REC_ALLOW | REC_BLOCK
 2647 } rectype_t;
 2648 
 2649 /*
 2650  * Enqueue an MLDv2 filter list change to the given output queue.
 2651  *
 2652  * Source list filter state is held in an RB-tree. When the filter list
 2653  * for a group is changed without changing its mode, we need to compute
 2654  * the deltas between T0 and T1 for each source in the filter set,
 2655  * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
 2656  *
 2657  * As we may potentially queue two record types, and the entire R-B tree
 2658  * needs to be walked at once, we break this out into its own function
 2659  * so we can generate a tightly packed queue of packets.
 2660  *
 2661  * XXX This could be written to only use one tree walk, although that makes
 2662  * serializing into the mbuf chains a bit harder. For now we do two walks
 2663  * which makes things easier on us, and it may or may not be harder on
 2664  * the L2 cache.
 2665  *
 2666  * If successful the size of all data appended to the queue is returned,
 2667  * otherwise an error code less than zero is returned, or zero if
 2668  * no record(s) were appended.
 2669  */
 2670 static int
 2671 mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm)
 2672 {
 2673         static const int MINRECLEN =
 2674             sizeof(struct mldv2_record) + sizeof(struct in6_addr);
 2675         struct ifnet            *ifp;
 2676         struct mldv2_record      mr;
 2677         struct mldv2_record     *pmr;
 2678         struct ip6_msource      *ims, *nims;
 2679         struct mbuf             *m, *m0, *md;
 2680         int                      m0srcs, nbytes, npbytes, off, rsrcs, schanged;
 2681         int                      nallow, nblock;
 2682         uint8_t                  mode, now, then;
 2683         rectype_t                crt, drt, nrt;
 2684 #ifdef KTR
 2685         char                     ip6tbuf[INET6_ADDRSTRLEN];
 2686 #endif
 2687 
 2688         IN6_MULTI_LOCK_ASSERT();
 2689 
 2690         if (inm->in6m_nsrc == 0 ||
 2691             (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0))
 2692                 return (0);
 2693 
 2694         ifp = inm->in6m_ifp;                    /* interface */
 2695         mode = inm->in6m_st[1].iss_fmode;       /* filter mode at t1 */
 2696         crt = REC_NONE; /* current group record type */
 2697         drt = REC_NONE; /* mask of completed group record types */
 2698         nrt = REC_NONE; /* record type for current node */
 2699         m0srcs = 0;     /* # source which will fit in current mbuf chain */
 2700         npbytes = 0;    /* # of bytes appended this packet */
 2701         nbytes = 0;     /* # of bytes appended to group's state-change queue */
 2702         rsrcs = 0;      /* # sources encoded in current record */
 2703         schanged = 0;   /* # nodes encoded in overall filter change */
 2704         nallow = 0;     /* # of source entries in ALLOW_NEW */
 2705         nblock = 0;     /* # of source entries in BLOCK_OLD */
 2706         nims = NULL;    /* next tree node pointer */
 2707 
 2708         /*
 2709          * For each possible filter record mode.
 2710          * The first kind of source we encounter tells us which
 2711          * is the first kind of record we start appending.
 2712          * If a node transitioned to UNDEFINED at t1, its mode is treated
 2713          * as the inverse of the group's filter mode.
 2714          */
 2715         while (drt != REC_FULL) {
 2716                 do {
 2717                         m0 = ifq->ifq_tail;
 2718                         if (m0 != NULL &&
 2719                             (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
 2720                              MLD_V2_REPORT_MAXRECS) &&
 2721                             (m0->m_pkthdr.len + MINRECLEN) <
 2722                              (ifp->if_mtu - MLD_MTUSPACE)) {
 2723                                 m = m0;
 2724                                 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
 2725                                             sizeof(struct mldv2_record)) /
 2726                                             sizeof(struct in6_addr);
 2727                                 CTR1(KTR_MLD,
 2728                                     "%s: use previous packet", __func__);
 2729                         } else {
 2730                                 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 2731                                 if (m == NULL)
 2732                                         m = m_gethdr(M_DONTWAIT, MT_DATA);
 2733                                 if (m == NULL) {
 2734                                         CTR1(KTR_MLD,
 2735                                             "%s: m_get*() failed", __func__);
 2736                                         return (-ENOMEM);
 2737                                 }
 2738                                 m->m_pkthdr.PH_vt.vt_nrecs = 0;
 2739                                 mld_save_context(m, ifp);
 2740                                 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
 2741                                     sizeof(struct mldv2_record)) /
 2742                                     sizeof(struct in6_addr);
 2743                                 npbytes = 0;
 2744                                 CTR1(KTR_MLD,
 2745                                     "%s: allocated new packet", __func__);
 2746                         }
 2747                         /*
 2748                          * Append the MLD group record header to the
 2749                          * current packet's data area.
 2750                          * Recalculate pointer to free space for next
 2751                          * group record, in case m_append() allocated
 2752                          * a new mbuf or cluster.
 2753                          */
 2754                         memset(&mr, 0, sizeof(mr));
 2755                         mr.mr_addr = inm->in6m_addr;
 2756                         in6_clearscope(&mr.mr_addr);
 2757                         if (!m_append(m, sizeof(mr), (void *)&mr)) {
 2758                                 if (m != m0)
 2759                                         m_freem(m);
 2760                                 CTR1(KTR_MLD,
 2761                                     "%s: m_append() failed", __func__);
 2762                                 return (-ENOMEM);
 2763                         }
 2764                         npbytes += sizeof(struct mldv2_record);
 2765                         if (m != m0) {
 2766                                 /* new packet; offset in chain */
 2767                                 md = m_getptr(m, npbytes -
 2768                                     sizeof(struct mldv2_record), &off);
 2769                                 pmr = (struct mldv2_record *)(mtod(md,
 2770                                     uint8_t *) + off);
 2771                         } else {
 2772                                 /* current packet; offset from last append */
 2773                                 md = m_last(m);
 2774                                 pmr = (struct mldv2_record *)(mtod(md,
 2775                                     uint8_t *) + md->m_len -
 2776                                     sizeof(struct mldv2_record));
 2777                         }
 2778                         /*
 2779                          * Begin walking the tree for this record type
 2780                          * pass, or continue from where we left off
 2781                          * previously if we had to allocate a new packet.
 2782                          * Only report deltas in-mode at t1.
 2783                          * We need not report included sources as allowed
 2784                          * if we are in inclusive mode on the group,
 2785                          * however the converse is not true.
 2786                          */
 2787                         rsrcs = 0;
 2788                         if (nims == NULL) {
 2789                                 nims = RB_MIN(ip6_msource_tree,
 2790                                     &inm->in6m_srcs);
 2791                         }
 2792                         RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
 2793                                 CTR2(KTR_MLD, "%s: visit node %s", __func__,
 2794                                     ip6_sprintf(ip6tbuf, &ims->im6s_addr));
 2795                                 now = im6s_get_mode(inm, ims, 1);
 2796                                 then = im6s_get_mode(inm, ims, 0);
 2797                                 CTR3(KTR_MLD, "%s: mode: t0 %d, t1 %d",
 2798                                     __func__, then, now);
 2799                                 if (now == then) {
 2800                                         CTR1(KTR_MLD,
 2801                                             "%s: skip unchanged", __func__);
 2802                                         continue;
 2803                                 }
 2804                                 if (mode == MCAST_EXCLUDE &&
 2805                                     now == MCAST_INCLUDE) {
 2806                                         CTR1(KTR_MLD,
 2807                                             "%s: skip IN src on EX group",
 2808                                             __func__);
 2809                                         continue;
 2810                                 }
 2811                                 nrt = (rectype_t)now;
 2812                                 if (nrt == REC_NONE)
 2813                                         nrt = (rectype_t)(~mode & REC_FULL);
 2814                                 if (schanged++ == 0) {
 2815                                         crt = nrt;
 2816                                 } else if (crt != nrt)
 2817                                         continue;
 2818                                 if (!m_append(m, sizeof(struct in6_addr),
 2819                                     (void *)&ims->im6s_addr)) {
 2820                                         if (m != m0)
 2821                                                 m_freem(m);
 2822                                         CTR1(KTR_MLD,
 2823                                             "%s: m_append() failed", __func__);
 2824                                         return (-ENOMEM);
 2825                                 }
 2826                                 nallow += !!(crt == REC_ALLOW);
 2827                                 nblock += !!(crt == REC_BLOCK);
 2828                                 if (++rsrcs == m0srcs)
 2829                                         break;
 2830                         }
 2831                         /*
 2832                          * If we did not append any tree nodes on this
 2833                          * pass, back out of allocations.
 2834                          */
 2835                         if (rsrcs == 0) {
 2836                                 npbytes -= sizeof(struct mldv2_record);
 2837                                 if (m != m0) {
 2838                                         CTR1(KTR_MLD,
 2839                                             "%s: m_free(m)", __func__);
 2840                                         m_freem(m);
 2841                                 } else {
 2842                                         CTR1(KTR_MLD,
 2843                                             "%s: m_adj(m, -mr)", __func__);
 2844                                         m_adj(m, -((int)sizeof(
 2845                                             struct mldv2_record)));
 2846                                 }
 2847                                 continue;
 2848                         }
 2849                         npbytes += (rsrcs * sizeof(struct in6_addr));
 2850                         if (crt == REC_ALLOW)
 2851                                 pmr->mr_type = MLD_ALLOW_NEW_SOURCES;
 2852                         else if (crt == REC_BLOCK)
 2853                                 pmr->mr_type = MLD_BLOCK_OLD_SOURCES;
 2854                         pmr->mr_numsrc = htons(rsrcs);
 2855                         /*
 2856                          * Count the new group record, and enqueue this
 2857                          * packet if it wasn't already queued.
 2858                          */
 2859                         m->m_pkthdr.PH_vt.vt_nrecs++;
 2860                         if (m != m0)
 2861                                 _IF_ENQUEUE(ifq, m);
 2862                         nbytes += npbytes;
 2863                 } while (nims != NULL);
 2864                 drt |= crt;
 2865                 crt = (~crt & REC_FULL);
 2866         }
 2867 
 2868         CTR3(KTR_MLD, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
 2869             nallow, nblock);
 2870 
 2871         return (nbytes);
 2872 }
 2873 
 2874 static int
 2875 mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq)
 2876 {
 2877         struct ifqueue  *gq;
 2878         struct mbuf     *m;             /* pending state-change */
 2879         struct mbuf     *m0;            /* copy of pending state-change */
 2880         struct mbuf     *mt;            /* last state-change in packet */
 2881         int              docopy, domerge;
 2882         u_int            recslen;
 2883 
 2884         docopy = 0;
 2885         domerge = 0;
 2886         recslen = 0;
 2887 
 2888         IN6_MULTI_LOCK_ASSERT();
 2889         MLD_LOCK_ASSERT();
 2890 
 2891         /*
 2892          * If there are further pending retransmissions, make a writable
 2893          * copy of each queued state-change message before merging.
 2894          */
 2895         if (inm->in6m_scrv > 0)
 2896                 docopy = 1;
 2897 
 2898         gq = &inm->in6m_scq;
 2899 #ifdef KTR
 2900         if (gq->ifq_head == NULL) {
 2901                 CTR2(KTR_MLD, "%s: WARNING: queue for inm %p is empty",
 2902                     __func__, inm);
 2903         }
 2904 #endif
 2905 
 2906         m = gq->ifq_head;
 2907         while (m != NULL) {
 2908                 /*
 2909                  * Only merge the report into the current packet if
 2910                  * there is sufficient space to do so; an MLDv2 report
 2911                  * packet may only contain 65,535 group records.
 2912                  * Always use a simple mbuf chain concatentation to do this,
 2913                  * as large state changes for single groups may have
 2914                  * allocated clusters.
 2915                  */
 2916                 domerge = 0;
 2917                 mt = ifscq->ifq_tail;
 2918                 if (mt != NULL) {
 2919                         recslen = m_length(m, NULL);
 2920 
 2921                         if ((mt->m_pkthdr.PH_vt.vt_nrecs +
 2922                             m->m_pkthdr.PH_vt.vt_nrecs <=
 2923                             MLD_V2_REPORT_MAXRECS) &&
 2924                             (mt->m_pkthdr.len + recslen <=
 2925                             (inm->in6m_ifp->if_mtu - MLD_MTUSPACE)))
 2926                                 domerge = 1;
 2927                 }
 2928 
 2929                 if (!domerge && _IF_QFULL(gq)) {
 2930                         CTR2(KTR_MLD,
 2931                             "%s: outbound queue full, skipping whole packet %p",
 2932                             __func__, m);
 2933                         mt = m->m_nextpkt;
 2934                         if (!docopy)
 2935                                 m_freem(m);
 2936                         m = mt;
 2937                         continue;
 2938                 }
 2939 
 2940                 if (!docopy) {
 2941                         CTR2(KTR_MLD, "%s: dequeueing %p", __func__, m);
 2942                         _IF_DEQUEUE(gq, m0);
 2943                         m = m0->m_nextpkt;
 2944                 } else {
 2945                         CTR2(KTR_MLD, "%s: copying %p", __func__, m);
 2946                         m0 = m_dup(m, M_NOWAIT);
 2947                         if (m0 == NULL)
 2948                                 return (ENOMEM);
 2949                         m0->m_nextpkt = NULL;
 2950                         m = m->m_nextpkt;
 2951                 }
 2952 
 2953                 if (!domerge) {
 2954                         CTR3(KTR_MLD, "%s: queueing %p to ifscq %p)",
 2955                             __func__, m0, ifscq);
 2956                         _IF_ENQUEUE(ifscq, m0);
 2957                 } else {
 2958                         struct mbuf *mtl;       /* last mbuf of packet mt */
 2959 
 2960                         CTR3(KTR_MLD, "%s: merging %p with ifscq tail %p)",
 2961                             __func__, m0, mt);
 2962 
 2963                         mtl = m_last(mt);
 2964                         m0->m_flags &= ~M_PKTHDR;
 2965                         mt->m_pkthdr.len += recslen;
 2966                         mt->m_pkthdr.PH_vt.vt_nrecs +=
 2967                             m0->m_pkthdr.PH_vt.vt_nrecs;
 2968 
 2969                         mtl->m_next = m0;
 2970                 }
 2971         }
 2972 
 2973         return (0);
 2974 }
 2975 
 2976 /*
 2977  * Respond to a pending MLDv2 General Query.
 2978  */
 2979 static void
 2980 mld_v2_dispatch_general_query(struct mld_ifinfo *mli)
 2981 {
 2982         struct ifmultiaddr      *ifma;
 2983         struct ifnet            *ifp;
 2984         struct in6_multi        *inm;
 2985         int                      retval;
 2986 
 2987         IN6_MULTI_LOCK_ASSERT();
 2988         MLD_LOCK_ASSERT();
 2989 
 2990         KASSERT(mli->mli_version == MLD_VERSION_2,
 2991             ("%s: called when version %d", __func__, mli->mli_version));
 2992 
 2993         ifp = mli->mli_ifp;
 2994 
 2995         IF_ADDR_LOCK(ifp);
 2996         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 2997                 if (ifma->ifma_addr->sa_family != AF_INET6 ||
 2998                     ifma->ifma_protospec == NULL)
 2999                         continue;
 3000 
 3001                 inm = (struct in6_multi *)ifma->ifma_protospec;
 3002                 KASSERT(ifp == inm->in6m_ifp,
 3003                     ("%s: inconsistent ifp", __func__));
 3004 
 3005                 switch (inm->in6m_state) {
 3006                 case MLD_NOT_MEMBER:
 3007                 case MLD_SILENT_MEMBER:
 3008                         break;
 3009                 case MLD_REPORTING_MEMBER:
 3010                 case MLD_IDLE_MEMBER:
 3011                 case MLD_LAZY_MEMBER:
 3012                 case MLD_SLEEPING_MEMBER:
 3013                 case MLD_AWAKENING_MEMBER:
 3014                         inm->in6m_state = MLD_REPORTING_MEMBER;
 3015                         retval = mld_v2_enqueue_group_record(&mli->mli_gq,
 3016                             inm, 0, 0, 0, 0);
 3017                         CTR2(KTR_MLD, "%s: enqueue record = %d",
 3018                             __func__, retval);
 3019                         break;
 3020                 case MLD_G_QUERY_PENDING_MEMBER:
 3021                 case MLD_SG_QUERY_PENDING_MEMBER:
 3022                 case MLD_LEAVING_MEMBER:
 3023                         break;
 3024                 }
 3025         }
 3026         IF_ADDR_UNLOCK(ifp);
 3027 
 3028         mld_dispatch_queue(&mli->mli_gq, MLD_MAX_RESPONSE_BURST);
 3029 
 3030         /*
 3031          * Slew transmission of bursts over 500ms intervals.
 3032          */
 3033         if (mli->mli_gq.ifq_head != NULL) {
 3034                 mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY(
 3035                     MLD_RESPONSE_BURST_INTERVAL);
 3036                 V_interface_timers_running6 = 1;
 3037         }
 3038 }
 3039 
 3040 /*
 3041  * Transmit the next pending message in the output queue.
 3042  *
 3043  * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
 3044  * MRT: Nothing needs to be done, as MLD traffic is always local to
 3045  * a link and uses a link-scope multicast address.
 3046  */
 3047 static void
 3048 mld_dispatch_packet(struct mbuf *m)
 3049 {
 3050         struct ip6_moptions      im6o;
 3051         struct ifnet            *ifp;
 3052         struct ifnet            *oifp;
 3053         struct mbuf             *m0;
 3054         struct mbuf             *md;
 3055         struct ip6_hdr          *ip6;
 3056         struct mld_hdr          *mld;
 3057         int                      error;
 3058         int                      off;
 3059         int                      type;
 3060         uint32_t                 ifindex;
 3061 
 3062         CTR2(KTR_MLD, "%s: transmit %p", __func__, m);
 3063 
 3064         /*
 3065          * Set VNET image pointer from enqueued mbuf chain
 3066          * before doing anything else. Whilst we use interface
 3067          * indexes to guard against interface detach, they are
 3068          * unique to each VIMAGE and must be retrieved.
 3069          */
 3070         ifindex = mld_restore_context(m);
 3071 
 3072         /*
 3073          * Check if the ifnet still exists. This limits the scope of
 3074          * any race in the absence of a global ifp lock for low cost
 3075          * (an array lookup).
 3076          */
 3077         ifp = ifnet_byindex(ifindex);
 3078         if (ifp == NULL) {
 3079                 CTR3(KTR_MLD, "%s: dropped %p as ifindex %u went away.",
 3080                     __func__, m, ifindex);
 3081                 m_freem(m);
 3082                 IP6STAT_INC(ip6s_noroute);
 3083                 goto out;
 3084         }
 3085 
 3086         im6o.im6o_multicast_hlim  = 1;
 3087         im6o.im6o_multicast_loop = (V_ip6_mrouter != NULL);
 3088         im6o.im6o_multicast_ifp = ifp;
 3089 
 3090         if (m->m_flags & M_MLDV1) {
 3091                 m0 = m;
 3092         } else {
 3093                 m0 = mld_v2_encap_report(ifp, m);
 3094                 if (m0 == NULL) {
 3095                         CTR2(KTR_MLD, "%s: dropped %p", __func__, m);
 3096                         m_freem(m);
 3097                         IP6STAT_INC(ip6s_odropped);
 3098                         goto out;
 3099                 }
 3100         }
 3101 
 3102         mld_scrub_context(m0);
 3103         m->m_flags &= ~(M_PROTOFLAGS);
 3104         m0->m_pkthdr.rcvif = V_loif;
 3105 
 3106         ip6 = mtod(m0, struct ip6_hdr *);
 3107 #if 0
 3108         (void)in6_setscope(&ip6->ip6_dst, ifp, NULL);   /* XXX LOR */
 3109 #else
 3110         /*
 3111          * XXX XXX Break some KPI rules to prevent an LOR which would
 3112          * occur if we called in6_setscope() at transmission.
 3113          * See comments at top of file.
 3114          */
 3115         MLD_EMBEDSCOPE(&ip6->ip6_dst, ifp->if_index);
 3116 #endif
 3117 
 3118         /*
 3119          * Retrieve the ICMPv6 type before handoff to ip6_output(),
 3120          * so we can bump the stats.
 3121          */
 3122         md = m_getptr(m0, sizeof(struct ip6_hdr), &off);
 3123         mld = (struct mld_hdr *)(mtod(md, uint8_t *) + off);
 3124         type = mld->mld_type;
 3125 
 3126         error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, &im6o,
 3127             &oifp, NULL);
 3128         if (error) {
 3129                 CTR3(KTR_MLD, "%s: ip6_output(%p) = %d", __func__, m0, error);
 3130                 goto out;
 3131         }
 3132         ICMP6STAT_INC(icp6s_outhist[type]);
 3133         if (oifp != NULL) {
 3134                 icmp6_ifstat_inc(oifp, ifs6_out_msg);
 3135                 switch (type) {
 3136                 case MLD_LISTENER_REPORT:
 3137                 case MLDV2_LISTENER_REPORT:
 3138                         icmp6_ifstat_inc(oifp, ifs6_out_mldreport);
 3139                         break;
 3140                 case MLD_LISTENER_DONE:
 3141                         icmp6_ifstat_inc(oifp, ifs6_out_mlddone);
 3142                         break;
 3143                 }
 3144         }
 3145 out:
 3146         return;
 3147 }
 3148 
 3149 /*
 3150  * Encapsulate an MLDv2 report.
 3151  *
 3152  * KAME IPv6 requires that hop-by-hop options be passed separately,
 3153  * and that the IPv6 header be prepended in a separate mbuf.
 3154  *
 3155  * Returns a pointer to the new mbuf chain head, or NULL if the
 3156  * allocation failed.
 3157  */
 3158 static struct mbuf *
 3159 mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
 3160 {
 3161         struct mbuf             *mh;
 3162         struct mldv2_report     *mld;
 3163         struct ip6_hdr          *ip6;
 3164         struct in6_ifaddr       *ia;
 3165         int                      mldreclen;
 3166 
 3167         KASSERT(ifp != NULL, ("%s: null ifp", __func__));
 3168         KASSERT((m->m_flags & M_PKTHDR),
 3169             ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
 3170 
 3171         /*
 3172          * RFC3590: OK to send as :: or tentative during DAD.
 3173          */
 3174         ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
 3175         if (ia == NULL)
 3176                 CTR1(KTR_MLD, "%s: warning: ia is NULL", __func__);
 3177 
 3178         MGETHDR(mh, M_DONTWAIT, MT_HEADER);
 3179         if (mh == NULL) {
 3180                 if (ia != NULL)
 3181                         ifa_free(&ia->ia_ifa);
 3182                 m_freem(m);
 3183                 return (NULL);
 3184         }
 3185         MH_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report));
 3186 
 3187         mldreclen = m_length(m, NULL);
 3188         CTR2(KTR_MLD, "%s: mldreclen is %d", __func__, mldreclen);
 3189 
 3190         mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report);
 3191         mh->m_pkthdr.len = sizeof(struct ip6_hdr) +
 3192             sizeof(struct mldv2_report) + mldreclen;
 3193 
 3194         ip6 = mtod(mh, struct ip6_hdr *);
 3195         ip6->ip6_flow = 0;
 3196         ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
 3197         ip6->ip6_vfc |= IPV6_VERSION;
 3198         ip6->ip6_nxt = IPPROTO_ICMPV6;
 3199         ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
 3200         if (ia != NULL)
 3201                 ifa_free(&ia->ia_ifa);
 3202         ip6->ip6_dst = in6addr_linklocal_allv2routers;
 3203         /* scope ID will be set in netisr */
 3204 
 3205         mld = (struct mldv2_report *)(ip6 + 1);
 3206         mld->mld_type = MLDV2_LISTENER_REPORT;
 3207         mld->mld_code = 0;
 3208         mld->mld_cksum = 0;
 3209         mld->mld_v2_reserved = 0;
 3210         mld->mld_v2_numrecs = htons(m->m_pkthdr.PH_vt.vt_nrecs);
 3211         m->m_pkthdr.PH_vt.vt_nrecs = 0;
 3212 
 3213         mh->m_next = m;
 3214         mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
 3215             sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen);
 3216         return (mh);
 3217 }
 3218 
 3219 #ifdef KTR
 3220 static char *
 3221 mld_rec_type_to_str(const int type)
 3222 {
 3223 
 3224         switch (type) {
 3225                 case MLD_CHANGE_TO_EXCLUDE_MODE:
 3226                         return "TO_EX";
 3227                         break;
 3228                 case MLD_CHANGE_TO_INCLUDE_MODE:
 3229                         return "TO_IN";
 3230                         break;
 3231                 case MLD_MODE_IS_EXCLUDE:
 3232                         return "MODE_EX";
 3233                         break;
 3234                 case MLD_MODE_IS_INCLUDE:
 3235                         return "MODE_IN";
 3236                         break;
 3237                 case MLD_ALLOW_NEW_SOURCES:
 3238                         return "ALLOW_NEW";
 3239                         break;
 3240                 case MLD_BLOCK_OLD_SOURCES:
 3241                         return "BLOCK_OLD";
 3242                         break;
 3243                 default:
 3244                         break;
 3245         }
 3246         return "unknown";
 3247 }
 3248 #endif
 3249 
 3250 static void
 3251 mld_init(void *unused __unused)
 3252 {
 3253 
 3254         CTR1(KTR_MLD, "%s: initializing", __func__);
 3255         MLD_LOCK_INIT();
 3256 
 3257         ip6_initpktopts(&mld_po);
 3258         mld_po.ip6po_hlim = 1;
 3259         mld_po.ip6po_hbh = &mld_ra.hbh;
 3260         mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER;
 3261         mld_po.ip6po_flags = IP6PO_DONTFRAG;
 3262 }
 3263 SYSINIT(mld_init, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, mld_init, NULL);
 3264 
 3265 static void
 3266 mld_uninit(void *unused __unused)
 3267 {
 3268 
 3269         CTR1(KTR_MLD, "%s: tearing down", __func__);
 3270         MLD_LOCK_DESTROY();
 3271 }
 3272 SYSUNINIT(mld_uninit, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, mld_uninit, NULL);
 3273 
 3274 static void
 3275 vnet_mld_init(const void *unused __unused)
 3276 {
 3277 
 3278         CTR1(KTR_MLD, "%s: initializing", __func__);
 3279 
 3280         LIST_INIT(&V_mli_head);
 3281 }
 3282 VNET_SYSINIT(vnet_mld_init, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_mld_init,
 3283     NULL);
 3284 
 3285 static void
 3286 vnet_mld_uninit(const void *unused __unused)
 3287 {
 3288 
 3289         CTR1(KTR_MLD, "%s: tearing down", __func__);
 3290 
 3291         KASSERT(LIST_EMPTY(&V_mli_head),
 3292             ("%s: mli list not empty; ifnets not detached?", __func__));
 3293 }
 3294 VNET_SYSUNINIT(vnet_mld_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_mld_uninit,
 3295     NULL);
 3296 
 3297 static int
 3298 mld_modevent(module_t mod, int type, void *unused __unused)
 3299 {
 3300 
 3301     switch (type) {
 3302     case MOD_LOAD:
 3303     case MOD_UNLOAD:
 3304         break;
 3305     default:
 3306         return (EOPNOTSUPP);
 3307     }
 3308     return (0);
 3309 }
 3310 
 3311 static moduledata_t mld_mod = {
 3312     "mld",
 3313     mld_modevent,
 3314     0
 3315 };
 3316 DECLARE_MODULE(mld, mld_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);

Cache object: 91a648f81266745f98265557a9edcfd2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.