The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet6/mld6.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 2009 Bruce Simpson.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 3. The name of the author may not be used to endorse or promote
   15  *    products derived from this software without specific prior written
   16  *    permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  *      $KAME: mld6.c,v 1.27 2001/04/04 05:17:30 itojun Exp $
   31  */
   32 
   33 /*-
   34  * Copyright (c) 1988 Stephen Deering.
   35  * Copyright (c) 1992, 1993
   36  *      The Regents of the University of California.  All rights reserved.
   37  *
   38  * This code is derived from software contributed to Berkeley by
   39  * Stephen Deering of Stanford University.
   40  *
   41  * Redistribution and use in source and binary forms, with or without
   42  * modification, are permitted provided that the following conditions
   43  * are met:
   44  * 1. Redistributions of source code must retain the above copyright
   45  *    notice, this list of conditions and the following disclaimer.
   46  * 2. Redistributions in binary form must reproduce the above copyright
   47  *    notice, this list of conditions and the following disclaimer in the
   48  *    documentation and/or other materials provided with the distribution.
   49  * 3. Neither the name of the University nor the names of its contributors
   50  *    may be used to endorse or promote products derived from this software
   51  *    without specific prior written permission.
   52  *
   53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   63  * SUCH DAMAGE.
   64  *
   65  *      @(#)igmp.c      8.1 (Berkeley) 7/19/93
   66  */
   67 
   68 #include <sys/cdefs.h>
   69 __FBSDID("$FreeBSD: releng/12.0/sys/netinet6/mld6.c 338163 2018-08-21 23:03:02Z mmacy $");
   70 
   71 #include "opt_inet.h"
   72 #include "opt_inet6.h"
   73 
   74 #include <sys/param.h>
   75 #include <sys/systm.h>
   76 #include <sys/mbuf.h>
   77 #include <sys/socket.h>
   78 #include <sys/protosw.h>
   79 #include <sys/sysctl.h>
   80 #include <sys/kernel.h>
   81 #include <sys/callout.h>
   82 #include <sys/malloc.h>
   83 #include <sys/module.h>
   84 #include <sys/ktr.h>
   85 
   86 #include <net/if.h>
   87 #include <net/if_var.h>
   88 #include <net/route.h>
   89 #include <net/vnet.h>
   90 
   91 #include <netinet/in.h>
   92 #include <netinet/in_var.h>
   93 #include <netinet6/in6_var.h>
   94 #include <netinet/ip6.h>
   95 #include <netinet6/ip6_var.h>
   96 #include <netinet6/scope6_var.h>
   97 #include <netinet/icmp6.h>
   98 #include <netinet6/mld6.h>
   99 #include <netinet6/mld6_var.h>
  100 
  101 #include <security/mac/mac_framework.h>
  102 
  103 #ifndef KTR_MLD
  104 #define KTR_MLD KTR_INET6
  105 #endif
  106 
  107 static struct mld_ifsoftc *
  108                 mli_alloc_locked(struct ifnet *);
  109 static void     mli_delete_locked(const struct ifnet *);
  110 static void     mld_dispatch_packet(struct mbuf *);
  111 static void     mld_dispatch_queue(struct mbufq *, int);
  112 static void     mld_final_leave(struct in6_multi *, struct mld_ifsoftc *);
  113 static void     mld_fasttimo_vnet(void);
  114 static int      mld_handle_state_change(struct in6_multi *,
  115                     struct mld_ifsoftc *);
  116 static int      mld_initial_join(struct in6_multi *, struct mld_ifsoftc *,
  117                     const int);
  118 #ifdef KTR
  119 static char *   mld_rec_type_to_str(const int);
  120 #endif
  121 static void     mld_set_version(struct mld_ifsoftc *, const int);
  122 static void     mld_slowtimo_vnet(void);
  123 static int      mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
  124                     /*const*/ struct mld_hdr *);
  125 static int      mld_v1_input_report(struct ifnet *, const struct ip6_hdr *,
  126                     /*const*/ struct mld_hdr *);
  127 static void     mld_v1_process_group_timer(struct in6_multi_head *,
  128                     struct in6_multi *);
  129 static void     mld_v1_process_querier_timers(struct mld_ifsoftc *);
  130 static int      mld_v1_transmit_report(struct in6_multi *, const int);
  131 static void     mld_v1_update_group(struct in6_multi *, const int);
  132 static void     mld_v2_cancel_link_timers(struct mld_ifsoftc *);
  133 static void     mld_v2_dispatch_general_query(struct mld_ifsoftc *);
  134 static struct mbuf *
  135                 mld_v2_encap_report(struct ifnet *, struct mbuf *);
  136 static int      mld_v2_enqueue_filter_change(struct mbufq *,
  137                     struct in6_multi *);
  138 static int      mld_v2_enqueue_group_record(struct mbufq *,
  139                     struct in6_multi *, const int, const int, const int,
  140                     const int);
  141 static int      mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
  142                     struct mbuf *, const int, const int);
  143 static int      mld_v2_merge_state_changes(struct in6_multi *,
  144                     struct mbufq *);
  145 static void     mld_v2_process_group_timers(struct in6_multi_head *,
  146                     struct mbufq *, struct mbufq *,
  147                     struct in6_multi *, const int);
  148 static int      mld_v2_process_group_query(struct in6_multi *,
  149                     struct mld_ifsoftc *mli, int, struct mbuf *, const int);
  150 static int      sysctl_mld_gsr(SYSCTL_HANDLER_ARGS);
  151 static int      sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS);
  152 
  153 /*
  154  * Normative references: RFC 2710, RFC 3590, RFC 3810.
  155  *
  156  * Locking:
  157  *  * The MLD subsystem lock ends up being system-wide for the moment,
  158  *    but could be per-VIMAGE later on.
  159  *  * The permitted lock order is: IN6_MULTI_LOCK, MLD_LOCK, IF_ADDR_LOCK.
  160  *    Any may be taken independently; if any are held at the same
  161  *    time, the above lock order must be followed.
  162  *  * IN6_MULTI_LOCK covers in_multi.
  163  *  * MLD_LOCK covers per-link state and any global variables in this file.
  164  *  * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
  165  *    per-link state iterators.
  166  *
  167  *  XXX LOR PREVENTION
  168  *  A special case for IPv6 is the in6_setscope() routine. ip6_output()
  169  *  will not accept an ifp; it wants an embedded scope ID, unlike
  170  *  ip_output(), which happily takes the ifp given to it. The embedded
  171  *  scope ID is only used by MLD to select the outgoing interface.
  172  *
  173  *  During interface attach and detach, MLD will take MLD_LOCK *after*
  174  *  the IF_AFDATA_LOCK.
  175  *  As in6_setscope() takes IF_AFDATA_LOCK then SCOPE_LOCK, we can't call
  176  *  it with MLD_LOCK held without triggering an LOR. A netisr with indirect
  177  *  dispatch could work around this, but we'd rather not do that, as it
  178  *  can introduce other races.
  179  *
  180  *  As such, we exploit the fact that the scope ID is just the interface
  181  *  index, and embed it in the IPv6 destination address accordingly.
  182  *  This is potentially NOT VALID for MLDv1 reports, as they
  183  *  are always sent to the multicast group itself; as MLDv2
  184  *  reports are always sent to ff02::16, this is not an issue
  185  *  when MLDv2 is in use.
  186  *
  187  *  This does not however eliminate the LOR when ip6_output() itself
  188  *  calls in6_setscope() internally whilst MLD_LOCK is held. This will
  189  *  trigger a LOR warning in WITNESS when the ifnet is detached.
  190  *
  191  *  The right answer is probably to make IF_AFDATA_LOCK an rwlock, given
  192  *  how it's used across the network stack. Here we're simply exploiting
  193  *  the fact that MLD runs at a similar layer in the stack to scope6.c.
  194  *
  195  * VIMAGE:
  196  *  * Each in6_multi corresponds to an ifp, and each ifp corresponds
  197  *    to a vnet in ifp->if_vnet.
  198  */
  199 static struct mtx                mld_mtx;
  200 static MALLOC_DEFINE(M_MLD, "mld", "mld state");
  201 
  202 #define MLD_EMBEDSCOPE(pin6, zoneid)                                    \
  203         if (IN6_IS_SCOPE_LINKLOCAL(pin6) ||                             \
  204             IN6_IS_ADDR_MC_INTFACELOCAL(pin6))                          \
  205                 (pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF)         \
  206 
  207 /*
  208  * VIMAGE-wide globals.
  209  */
  210 VNET_DEFINE_STATIC(struct timeval, mld_gsrdelay) = {10, 0};
  211 VNET_DEFINE_STATIC(LIST_HEAD(, mld_ifsoftc), mli_head);
  212 VNET_DEFINE_STATIC(int, interface_timers_running6);
  213 VNET_DEFINE_STATIC(int, state_change_timers_running6);
  214 VNET_DEFINE_STATIC(int, current_state_timers_running6);
  215 
  216 #define V_mld_gsrdelay                  VNET(mld_gsrdelay)
  217 #define V_mli_head                      VNET(mli_head)
  218 #define V_interface_timers_running6     VNET(interface_timers_running6)
  219 #define V_state_change_timers_running6  VNET(state_change_timers_running6)
  220 #define V_current_state_timers_running6 VNET(current_state_timers_running6)
  221 
  222 SYSCTL_DECL(_net_inet6);        /* Note: Not in any common header. */
  223 
  224 SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW, 0,
  225     "IPv6 Multicast Listener Discovery");
  226 
  227 /*
  228  * Virtualized sysctls.
  229  */
  230 SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay,
  231     CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
  232     &VNET_NAME(mld_gsrdelay.tv_sec), 0, sysctl_mld_gsr, "I",
  233     "Rate limit for MLDv2 Group-and-Source queries in seconds");
  234 
  235 /*
  236  * Non-virtualized sysctls.
  237  */
  238 static SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo,
  239     CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_mld_ifinfo,
  240     "Per-interface MLDv2 state");
  241 
  242 static int      mld_v1enable = 1;
  243 SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RWTUN,
  244     &mld_v1enable, 0, "Enable fallback to MLDv1");
  245 
  246 static int      mld_use_allow = 1;
  247 SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RWTUN,
  248     &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
  249 
  250 /*
  251  * Packed Router Alert option structure declaration.
  252  */
  253 struct mld_raopt {
  254         struct ip6_hbh          hbh;
  255         struct ip6_opt          pad;
  256         struct ip6_opt_router   ra;
  257 } __packed;
  258 
  259 /*
  260  * Router Alert hop-by-hop option header.
  261  */
  262 static struct mld_raopt mld_ra = {
  263         .hbh = { 0, 0 },
  264         .pad = { .ip6o_type = IP6OPT_PADN, 0 },
  265         .ra = {
  266             .ip6or_type = IP6OPT_ROUTER_ALERT,
  267             .ip6or_len = IP6OPT_RTALERT_LEN - 2,
  268             .ip6or_value[0] = ((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
  269             .ip6or_value[1] = (IP6OPT_RTALERT_MLD & 0xFF)
  270         }
  271 };
  272 static struct ip6_pktopts mld_po;
  273 
  274 static __inline void
  275 mld_save_context(struct mbuf *m, struct ifnet *ifp)
  276 {
  277 
  278 #ifdef VIMAGE
  279         m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
  280 #endif /* VIMAGE */
  281         m->m_pkthdr.flowid = ifp->if_index;
  282 }
  283 
  284 static __inline void
  285 mld_scrub_context(struct mbuf *m)
  286 {
  287 
  288         m->m_pkthdr.PH_loc.ptr = NULL;
  289         m->m_pkthdr.flowid = 0;
  290 }
  291 
  292 /*
  293  * Restore context from a queued output chain.
  294  * Return saved ifindex.
  295  *
  296  * VIMAGE: The assertion is there to make sure that we
  297  * actually called CURVNET_SET() with what's in the mbuf chain.
  298  */
  299 static __inline uint32_t
  300 mld_restore_context(struct mbuf *m)
  301 {
  302 
  303 #if defined(VIMAGE) && defined(INVARIANTS)
  304         KASSERT(curvnet == m->m_pkthdr.PH_loc.ptr,
  305             ("%s: called when curvnet was not restored: cuvnet %p m ptr %p",
  306             __func__, curvnet, m->m_pkthdr.PH_loc.ptr));
  307 #endif
  308         return (m->m_pkthdr.flowid);
  309 }
  310 
  311 /*
  312  * Retrieve or set threshold between group-source queries in seconds.
  313  *
  314  * VIMAGE: Assume curvnet set by caller.
  315  * SMPng: NOTE: Serialized by MLD lock.
  316  */
  317 static int
  318 sysctl_mld_gsr(SYSCTL_HANDLER_ARGS)
  319 {
  320         int error;
  321         int i;
  322 
  323         error = sysctl_wire_old_buffer(req, sizeof(int));
  324         if (error)
  325                 return (error);
  326 
  327         MLD_LOCK();
  328 
  329         i = V_mld_gsrdelay.tv_sec;
  330 
  331         error = sysctl_handle_int(oidp, &i, 0, req);
  332         if (error || !req->newptr)
  333                 goto out_locked;
  334 
  335         if (i < -1 || i >= 60) {
  336                 error = EINVAL;
  337                 goto out_locked;
  338         }
  339 
  340         CTR2(KTR_MLD, "change mld_gsrdelay from %d to %d",
  341              V_mld_gsrdelay.tv_sec, i);
  342         V_mld_gsrdelay.tv_sec = i;
  343 
  344 out_locked:
  345         MLD_UNLOCK();
  346         return (error);
  347 }
  348 
  349 /*
  350  * Expose struct mld_ifsoftc to userland, keyed by ifindex.
  351  * For use by ifmcstat(8).
  352  *
  353  * SMPng: NOTE: Does an unlocked ifindex space read.
  354  * VIMAGE: Assume curvnet set by caller. The node handler itself
  355  * is not directly virtualized.
  356  */
  357 static int
  358 sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS)
  359 {
  360         int                     *name;
  361         int                      error;
  362         u_int                    namelen;
  363         struct ifnet            *ifp;
  364         struct mld_ifsoftc      *mli;
  365 
  366         name = (int *)arg1;
  367         namelen = arg2;
  368 
  369         if (req->newptr != NULL)
  370                 return (EPERM);
  371 
  372         if (namelen != 1)
  373                 return (EINVAL);
  374 
  375         error = sysctl_wire_old_buffer(req, sizeof(struct mld_ifinfo));
  376         if (error)
  377                 return (error);
  378 
  379         IN6_MULTI_LOCK();
  380         IN6_MULTI_LIST_LOCK();
  381         MLD_LOCK();
  382 
  383         if (name[0] <= 0 || name[0] > V_if_index) {
  384                 error = ENOENT;
  385                 goto out_locked;
  386         }
  387 
  388         error = ENOENT;
  389 
  390         ifp = ifnet_byindex(name[0]);
  391         if (ifp == NULL)
  392                 goto out_locked;
  393 
  394         LIST_FOREACH(mli, &V_mli_head, mli_link) {
  395                 if (ifp == mli->mli_ifp) {
  396                         struct mld_ifinfo info;
  397 
  398                         info.mli_version = mli->mli_version;
  399                         info.mli_v1_timer = mli->mli_v1_timer;
  400                         info.mli_v2_timer = mli->mli_v2_timer;
  401                         info.mli_flags = mli->mli_flags;
  402                         info.mli_rv = mli->mli_rv;
  403                         info.mli_qi = mli->mli_qi;
  404                         info.mli_qri = mli->mli_qri;
  405                         info.mli_uri = mli->mli_uri;
  406                         error = SYSCTL_OUT(req, &info, sizeof(info));
  407                         break;
  408                 }
  409         }
  410 
  411 out_locked:
  412         MLD_UNLOCK();
  413         IN6_MULTI_LIST_UNLOCK();
  414         IN6_MULTI_UNLOCK();
  415         return (error);
  416 }
  417 
  418 /*
  419  * Dispatch an entire queue of pending packet chains.
  420  * VIMAGE: Assumes the vnet pointer has been set.
  421  */
  422 static void
  423 mld_dispatch_queue(struct mbufq *mq, int limit)
  424 {
  425         struct mbuf *m;
  426 
  427         while ((m = mbufq_dequeue(mq)) != NULL) {
  428                 CTR3(KTR_MLD, "%s: dispatch %p from %p", __func__, mq, m);
  429                 mld_dispatch_packet(m);
  430                 if (--limit == 0)
  431                         break;
  432         }
  433 }
  434 
  435 /*
  436  * Filter outgoing MLD report state by group.
  437  *
  438  * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
  439  * and node-local addresses. However, kernel and socket consumers
  440  * always embed the KAME scope ID in the address provided, so strip it
  441  * when performing comparison.
  442  * Note: This is not the same as the *multicast* scope.
  443  *
  444  * Return zero if the given group is one for which MLD reports
  445  * should be suppressed, or non-zero if reports should be issued.
  446  */
  447 static __inline int
  448 mld_is_addr_reported(const struct in6_addr *addr)
  449 {
  450 
  451         KASSERT(IN6_IS_ADDR_MULTICAST(addr), ("%s: not multicast", __func__));
  452 
  453         if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL)
  454                 return (0);
  455 
  456         if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) {
  457                 struct in6_addr tmp = *addr;
  458                 in6_clearscope(&tmp);
  459                 if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes))
  460                         return (0);
  461         }
  462 
  463         return (1);
  464 }
  465 
  466 /*
  467  * Attach MLD when PF_INET6 is attached to an interface.
  468  *
  469  * SMPng: Normally called with IF_AFDATA_LOCK held.
  470  */
  471 struct mld_ifsoftc *
  472 mld_domifattach(struct ifnet *ifp)
  473 {
  474         struct mld_ifsoftc *mli;
  475 
  476         CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
  477             __func__, ifp, if_name(ifp));
  478 
  479         MLD_LOCK();
  480 
  481         mli = mli_alloc_locked(ifp);
  482         if (!(ifp->if_flags & IFF_MULTICAST))
  483                 mli->mli_flags |= MLIF_SILENT;
  484         if (mld_use_allow)
  485                 mli->mli_flags |= MLIF_USEALLOW;
  486 
  487         MLD_UNLOCK();
  488 
  489         return (mli);
  490 }
  491 
  492 /*
  493  * VIMAGE: assume curvnet set by caller.
  494  */
  495 static struct mld_ifsoftc *
  496 mli_alloc_locked(/*const*/ struct ifnet *ifp)
  497 {
  498         struct mld_ifsoftc *mli;
  499 
  500         MLD_LOCK_ASSERT();
  501 
  502         mli = malloc(sizeof(struct mld_ifsoftc), M_MLD, M_NOWAIT|M_ZERO);
  503         if (mli == NULL)
  504                 goto out;
  505 
  506         mli->mli_ifp = ifp;
  507         mli->mli_version = MLD_VERSION_2;
  508         mli->mli_flags = 0;
  509         mli->mli_rv = MLD_RV_INIT;
  510         mli->mli_qi = MLD_QI_INIT;
  511         mli->mli_qri = MLD_QRI_INIT;
  512         mli->mli_uri = MLD_URI_INIT;
  513         mbufq_init(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS);
  514 
  515         LIST_INSERT_HEAD(&V_mli_head, mli, mli_link);
  516 
  517         CTR2(KTR_MLD, "allocate mld_ifsoftc for ifp %p(%s)",
  518              ifp, if_name(ifp));
  519 
  520 out:
  521         return (mli);
  522 }
  523 
  524 /*
  525  * Hook for ifdetach.
  526  *
  527  * NOTE: Some finalization tasks need to run before the protocol domain
  528  * is detached, but also before the link layer does its cleanup.
  529  * Run before link-layer cleanup; cleanup groups, but do not free MLD state.
  530  *
  531  * SMPng: Caller must hold IN6_MULTI_LOCK().
  532  * Must take IF_ADDR_LOCK() to cover if_multiaddrs iterator.
  533  * XXX This routine is also bitten by unlocked ifma_protospec access.
  534  */
  535 void
  536 mld_ifdetach(struct ifnet *ifp)
  537 {
  538         struct mld_ifsoftc      *mli;
  539         struct ifmultiaddr      *ifma, *next;
  540         struct in6_multi        *inm;
  541         struct in6_multi_head inmh;
  542 
  543         CTR3(KTR_MLD, "%s: called for ifp %p(%s)", __func__, ifp,
  544             if_name(ifp));
  545 
  546         SLIST_INIT(&inmh);
  547         IN6_MULTI_LIST_LOCK_ASSERT();
  548         MLD_LOCK();
  549 
  550         mli = MLD_IFINFO(ifp);
  551         if (mli->mli_version == MLD_VERSION_2) {
  552                 IF_ADDR_WLOCK(ifp);
  553         restart:
  554                 CK_STAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) {
  555                         if (ifma->ifma_addr->sa_family != AF_INET6 ||
  556                             ifma->ifma_protospec == NULL)
  557                                 continue;
  558                         inm = (struct in6_multi *)ifma->ifma_protospec;
  559                         if (inm->in6m_state == MLD_LEAVING_MEMBER) {
  560                                 in6m_disconnect(inm);
  561                                 in6m_rele_locked(&inmh, inm);
  562                                 ifma->ifma_protospec = NULL;
  563                         }
  564                         in6m_clear_recorded(inm);
  565                         if (__predict_false(ifma6_restart)) {
  566                                 ifma6_restart = false;
  567                                 goto restart;
  568                         }
  569                 }
  570                 IF_ADDR_WUNLOCK(ifp);
  571         }
  572 
  573         MLD_UNLOCK();
  574         in6m_release_list_deferred(&inmh);
  575 }
  576 
  577 /*
  578  * Hook for domifdetach.
  579  * Runs after link-layer cleanup; free MLD state.
  580  *
  581  * SMPng: Normally called with IF_AFDATA_LOCK held.
  582  */
  583 void
  584 mld_domifdetach(struct ifnet *ifp)
  585 {
  586 
  587         CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
  588             __func__, ifp, if_name(ifp));
  589 
  590         MLD_LOCK();
  591         mli_delete_locked(ifp);
  592         MLD_UNLOCK();
  593 }
  594 
  595 static void
  596 mli_delete_locked(const struct ifnet *ifp)
  597 {
  598         struct mld_ifsoftc *mli, *tmli;
  599 
  600         CTR3(KTR_MLD, "%s: freeing mld_ifsoftc for ifp %p(%s)",
  601             __func__, ifp, if_name(ifp));
  602 
  603         MLD_LOCK_ASSERT();
  604 
  605         LIST_FOREACH_SAFE(mli, &V_mli_head, mli_link, tmli) {
  606                 if (mli->mli_ifp == ifp) {
  607                         /*
  608                          * Free deferred General Query responses.
  609                          */
  610                         mbufq_drain(&mli->mli_gq);
  611 
  612                         LIST_REMOVE(mli, mli_link);
  613 
  614                         free(mli, M_MLD);
  615                         return;
  616                 }
  617         }
  618 }
  619 
  620 /*
  621  * Process a received MLDv1 general or address-specific query.
  622  * Assumes that the query header has been pulled up to sizeof(mld_hdr).
  623  *
  624  * NOTE: Can't be fully const correct as we temporarily embed scope ID in
  625  * mld_addr. This is OK as we own the mbuf chain.
  626  */
  627 static int
  628 mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
  629     /*const*/ struct mld_hdr *mld)
  630 {
  631         struct ifmultiaddr      *ifma;
  632         struct mld_ifsoftc      *mli;
  633         struct in6_multi        *inm;
  634         int                      is_general_query;
  635         uint16_t                 timer;
  636 #ifdef KTR
  637         char                     ip6tbuf[INET6_ADDRSTRLEN];
  638 #endif
  639 
  640         is_general_query = 0;
  641 
  642         if (!mld_v1enable) {
  643                 CTR3(KTR_MLD, "ignore v1 query %s on ifp %p(%s)",
  644                     ip6_sprintf(ip6tbuf, &mld->mld_addr),
  645                     ifp, if_name(ifp));
  646                 return (0);
  647         }
  648 
  649         /*
  650          * RFC3810 Section 6.2: MLD queries must originate from
  651          * a router's link-local address.
  652          */
  653         if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
  654                 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
  655                     ip6_sprintf(ip6tbuf, &ip6->ip6_src),
  656                     ifp, if_name(ifp));
  657                 return (0);
  658         }
  659 
  660         /*
  661          * Do address field validation upfront before we accept
  662          * the query.
  663          */
  664         if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
  665                 /*
  666                  * MLDv1 General Query.
  667                  * If this was not sent to the all-nodes group, ignore it.
  668                  */
  669                 struct in6_addr          dst;
  670 
  671                 dst = ip6->ip6_dst;
  672                 in6_clearscope(&dst);
  673                 if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes))
  674                         return (EINVAL);
  675                 is_general_query = 1;
  676         } else {
  677                 /*
  678                  * Embed scope ID of receiving interface in MLD query for
  679                  * lookup whilst we don't hold other locks.
  680                  */
  681                 in6_setscope(&mld->mld_addr, ifp, NULL);
  682         }
  683 
  684         IN6_MULTI_LIST_LOCK();
  685         MLD_LOCK();
  686 
  687         /*
  688          * Switch to MLDv1 host compatibility mode.
  689          */
  690         mli = MLD_IFINFO(ifp);
  691         KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
  692         mld_set_version(mli, MLD_VERSION_1);
  693 
  694         timer = (ntohs(mld->mld_maxdelay) * PR_FASTHZ) / MLD_TIMER_SCALE;
  695         if (timer == 0)
  696                 timer = 1;
  697 
  698         IF_ADDR_RLOCK(ifp);
  699         if (is_general_query) {
  700                 /*
  701                  * For each reporting group joined on this
  702                  * interface, kick the report timer.
  703                  */
  704                 CTR2(KTR_MLD, "process v1 general query on ifp %p(%s)",
  705                          ifp, if_name(ifp));
  706                 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  707                         if (ifma->ifma_addr->sa_family != AF_INET6 ||
  708                             ifma->ifma_protospec == NULL)
  709                                 continue;
  710                         inm = (struct in6_multi *)ifma->ifma_protospec;
  711                         mld_v1_update_group(inm, timer);
  712                 }
  713         } else {
  714                 /*
  715                  * MLDv1 Group-Specific Query.
  716                  * If this is a group-specific MLDv1 query, we need only
  717                  * look up the single group to process it.
  718                  */
  719                 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
  720                 if (inm != NULL) {
  721                         CTR3(KTR_MLD, "process v1 query %s on ifp %p(%s)",
  722                             ip6_sprintf(ip6tbuf, &mld->mld_addr),
  723                             ifp, if_name(ifp));
  724                         mld_v1_update_group(inm, timer);
  725                 }
  726                 /* XXX Clear embedded scope ID as userland won't expect it. */
  727                 in6_clearscope(&mld->mld_addr);
  728         }
  729 
  730         IF_ADDR_RUNLOCK(ifp);
  731         MLD_UNLOCK();
  732         IN6_MULTI_LIST_UNLOCK();
  733 
  734         return (0);
  735 }
  736 
  737 /*
  738  * Update the report timer on a group in response to an MLDv1 query.
  739  *
  740  * If we are becoming the reporting member for this group, start the timer.
  741  * If we already are the reporting member for this group, and timer is
  742  * below the threshold, reset it.
  743  *
  744  * We may be updating the group for the first time since we switched
  745  * to MLDv2. If we are, then we must clear any recorded source lists,
  746  * and transition to REPORTING state; the group timer is overloaded
  747  * for group and group-source query responses. 
  748  *
  749  * Unlike MLDv2, the delay per group should be jittered
  750  * to avoid bursts of MLDv1 reports.
  751  */
  752 static void
  753 mld_v1_update_group(struct in6_multi *inm, const int timer)
  754 {
  755 #ifdef KTR
  756         char                     ip6tbuf[INET6_ADDRSTRLEN];
  757 #endif
  758 
  759         CTR4(KTR_MLD, "%s: %s/%s timer=%d", __func__,
  760             ip6_sprintf(ip6tbuf, &inm->in6m_addr),
  761             if_name(inm->in6m_ifp), timer);
  762 
  763         IN6_MULTI_LIST_LOCK_ASSERT();
  764 
  765         switch (inm->in6m_state) {
  766         case MLD_NOT_MEMBER:
  767         case MLD_SILENT_MEMBER:
  768                 break;
  769         case MLD_REPORTING_MEMBER:
  770                 if (inm->in6m_timer != 0 &&
  771                     inm->in6m_timer <= timer) {
  772                         CTR1(KTR_MLD, "%s: REPORTING and timer running, "
  773                             "skipping.", __func__);
  774                         break;
  775                 }
  776                 /* FALLTHROUGH */
  777         case MLD_SG_QUERY_PENDING_MEMBER:
  778         case MLD_G_QUERY_PENDING_MEMBER:
  779         case MLD_IDLE_MEMBER:
  780         case MLD_LAZY_MEMBER:
  781         case MLD_AWAKENING_MEMBER:
  782                 CTR1(KTR_MLD, "%s: ->REPORTING", __func__);
  783                 inm->in6m_state = MLD_REPORTING_MEMBER;
  784                 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
  785                 V_current_state_timers_running6 = 1;
  786                 break;
  787         case MLD_SLEEPING_MEMBER:
  788                 CTR1(KTR_MLD, "%s: ->AWAKENING", __func__);
  789                 inm->in6m_state = MLD_AWAKENING_MEMBER;
  790                 break;
  791         case MLD_LEAVING_MEMBER:
  792                 break;
  793         }
  794 }
  795 
  796 /*
  797  * Process a received MLDv2 general, group-specific or
  798  * group-and-source-specific query.
  799  *
  800  * Assumes that the query header has been pulled up to sizeof(mldv2_query).
  801  *
  802  * Return 0 if successful, otherwise an appropriate error code is returned.
  803  */
  804 static int
  805 mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
  806     struct mbuf *m, const int off, const int icmp6len)
  807 {
  808         struct mld_ifsoftc      *mli;
  809         struct mldv2_query      *mld;
  810         struct in6_multi        *inm;
  811         uint32_t                 maxdelay, nsrc, qqi;
  812         int                      is_general_query;
  813         uint16_t                 timer;
  814         uint8_t                  qrv;
  815 #ifdef KTR
  816         char                     ip6tbuf[INET6_ADDRSTRLEN];
  817 #endif
  818 
  819         is_general_query = 0;
  820 
  821         /*
  822          * RFC3810 Section 6.2: MLD queries must originate from
  823          * a router's link-local address.
  824          */
  825         if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
  826                 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
  827                     ip6_sprintf(ip6tbuf, &ip6->ip6_src),
  828                     ifp, if_name(ifp));
  829                 return (0);
  830         }
  831 
  832         CTR2(KTR_MLD, "input v2 query on ifp %p(%s)", ifp, if_name(ifp));
  833 
  834         mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off);
  835 
  836         maxdelay = ntohs(mld->mld_maxdelay);    /* in 1/10ths of a second */
  837         if (maxdelay >= 32768) {
  838                 maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) <<
  839                            (MLD_MRC_EXP(maxdelay) + 3);
  840         }
  841         timer = (maxdelay * PR_FASTHZ) / MLD_TIMER_SCALE;
  842         if (timer == 0)
  843                 timer = 1;
  844 
  845         qrv = MLD_QRV(mld->mld_misc);
  846         if (qrv < 2) {
  847                 CTR3(KTR_MLD, "%s: clamping qrv %d to %d", __func__,
  848                     qrv, MLD_RV_INIT);
  849                 qrv = MLD_RV_INIT;
  850         }
  851 
  852         qqi = mld->mld_qqi;
  853         if (qqi >= 128) {
  854                 qqi = MLD_QQIC_MANT(mld->mld_qqi) <<
  855                      (MLD_QQIC_EXP(mld->mld_qqi) + 3);
  856         }
  857 
  858         nsrc = ntohs(mld->mld_numsrc);
  859         if (nsrc > MLD_MAX_GS_SOURCES)
  860                 return (EMSGSIZE);
  861         if (icmp6len < sizeof(struct mldv2_query) +
  862             (nsrc * sizeof(struct in6_addr)))
  863                 return (EMSGSIZE);
  864 
  865         /*
  866          * Do further input validation upfront to avoid resetting timers
  867          * should we need to discard this query.
  868          */
  869         if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
  870                 /*
  871                  * A general query with a source list has undefined
  872                  * behaviour; discard it.
  873                  */
  874                 if (nsrc > 0)
  875                         return (EINVAL);
  876                 is_general_query = 1;
  877         } else {
  878                 /*
  879                  * Embed scope ID of receiving interface in MLD query for
  880                  * lookup whilst we don't hold other locks (due to KAME
  881                  * locking lameness). We own this mbuf chain just now.
  882                  */
  883                 in6_setscope(&mld->mld_addr, ifp, NULL);
  884         }
  885 
  886         IN6_MULTI_LIST_LOCK();
  887         MLD_LOCK();
  888 
  889         mli = MLD_IFINFO(ifp);
  890         KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
  891 
  892         /*
  893          * Discard the v2 query if we're in Compatibility Mode.
  894          * The RFC is pretty clear that hosts need to stay in MLDv1 mode
  895          * until the Old Version Querier Present timer expires.
  896          */
  897         if (mli->mli_version != MLD_VERSION_2)
  898                 goto out_locked;
  899 
  900         mld_set_version(mli, MLD_VERSION_2);
  901         mli->mli_rv = qrv;
  902         mli->mli_qi = qqi;
  903         mli->mli_qri = maxdelay;
  904 
  905         CTR4(KTR_MLD, "%s: qrv %d qi %d maxdelay %d", __func__, qrv, qqi,
  906             maxdelay);
  907 
  908         if (is_general_query) {
  909                 /*
  910                  * MLDv2 General Query.
  911                  *
  912                  * Schedule a current-state report on this ifp for
  913                  * all groups, possibly containing source lists.
  914                  *
  915                  * If there is a pending General Query response
  916                  * scheduled earlier than the selected delay, do
  917                  * not schedule any other reports.
  918                  * Otherwise, reset the interface timer.
  919                  */
  920                 CTR2(KTR_MLD, "process v2 general query on ifp %p(%s)",
  921                     ifp, if_name(ifp));
  922                 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
  923                         mli->mli_v2_timer = MLD_RANDOM_DELAY(timer);
  924                         V_interface_timers_running6 = 1;
  925                 }
  926         } else {
  927                 /*
  928                  * MLDv2 Group-specific or Group-and-source-specific Query.
  929                  *
  930                  * Group-source-specific queries are throttled on
  931                  * a per-group basis to defeat denial-of-service attempts.
  932                  * Queries for groups we are not a member of on this
  933                  * link are simply ignored.
  934                  */
  935                 IF_ADDR_RLOCK(ifp);
  936                 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
  937                 if (inm == NULL) {
  938                         IF_ADDR_RUNLOCK(ifp);
  939                         goto out_locked;
  940                 }
  941                 if (nsrc > 0) {
  942                         if (!ratecheck(&inm->in6m_lastgsrtv,
  943                             &V_mld_gsrdelay)) {
  944                                 CTR1(KTR_MLD, "%s: GS query throttled.",
  945                                     __func__);
  946                                 IF_ADDR_RUNLOCK(ifp);
  947                                 goto out_locked;
  948                         }
  949                 }
  950                 CTR2(KTR_MLD, "process v2 group query on ifp %p(%s)",
  951                      ifp, if_name(ifp));
  952                 /*
  953                  * If there is a pending General Query response
  954                  * scheduled sooner than the selected delay, no
  955                  * further report need be scheduled.
  956                  * Otherwise, prepare to respond to the
  957                  * group-specific or group-and-source query.
  958                  */
  959                 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer)
  960                         mld_v2_process_group_query(inm, mli, timer, m, off);
  961 
  962                 /* XXX Clear embedded scope ID as userland won't expect it. */
  963                 in6_clearscope(&mld->mld_addr);
  964                 IF_ADDR_RUNLOCK(ifp);
  965         }
  966 
  967 out_locked:
  968         MLD_UNLOCK();
  969         IN6_MULTI_LIST_UNLOCK();
  970 
  971         return (0);
  972 }
  973 
  974 /*
  975  * Process a received MLDv2 group-specific or group-and-source-specific
  976  * query.
  977  * Return <0 if any error occurred. Currently this is ignored.
  978  */
  979 static int
  980 mld_v2_process_group_query(struct in6_multi *inm, struct mld_ifsoftc *mli,
  981     int timer, struct mbuf *m0, const int off)
  982 {
  983         struct mldv2_query      *mld;
  984         int                      retval;
  985         uint16_t                 nsrc;
  986 
  987         IN6_MULTI_LIST_LOCK_ASSERT();
  988         MLD_LOCK_ASSERT();
  989 
  990         retval = 0;
  991         mld = (struct mldv2_query *)(mtod(m0, uint8_t *) + off);
  992 
  993         switch (inm->in6m_state) {
  994         case MLD_NOT_MEMBER:
  995         case MLD_SILENT_MEMBER:
  996         case MLD_SLEEPING_MEMBER:
  997         case MLD_LAZY_MEMBER:
  998         case MLD_AWAKENING_MEMBER:
  999         case MLD_IDLE_MEMBER:
 1000         case MLD_LEAVING_MEMBER:
 1001                 return (retval);
 1002                 break;
 1003         case MLD_REPORTING_MEMBER:
 1004         case MLD_G_QUERY_PENDING_MEMBER:
 1005         case MLD_SG_QUERY_PENDING_MEMBER:
 1006                 break;
 1007         }
 1008 
 1009         nsrc = ntohs(mld->mld_numsrc);
 1010 
 1011         /*
 1012          * Deal with group-specific queries upfront.
 1013          * If any group query is already pending, purge any recorded
 1014          * source-list state if it exists, and schedule a query response
 1015          * for this group-specific query.
 1016          */
 1017         if (nsrc == 0) {
 1018                 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
 1019                     inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
 1020                         in6m_clear_recorded(inm);
 1021                         timer = min(inm->in6m_timer, timer);
 1022                 }
 1023                 inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER;
 1024                 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
 1025                 V_current_state_timers_running6 = 1;
 1026                 return (retval);
 1027         }
 1028 
 1029         /*
 1030          * Deal with the case where a group-and-source-specific query has
 1031          * been received but a group-specific query is already pending.
 1032          */
 1033         if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) {
 1034                 timer = min(inm->in6m_timer, timer);
 1035                 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
 1036                 V_current_state_timers_running6 = 1;
 1037                 return (retval);
 1038         }
 1039 
 1040         /*
 1041          * Finally, deal with the case where a group-and-source-specific
 1042          * query has been received, where a response to a previous g-s-r
 1043          * query exists, or none exists.
 1044          * In this case, we need to parse the source-list which the Querier
 1045          * has provided us with and check if we have any source list filter
 1046          * entries at T1 for these sources. If we do not, there is no need
 1047          * schedule a report and the query may be dropped.
 1048          * If we do, we must record them and schedule a current-state
 1049          * report for those sources.
 1050          */
 1051         if (inm->in6m_nsrc > 0) {
 1052                 struct mbuf             *m;
 1053                 uint8_t                 *sp;
 1054                 int                      i, nrecorded;
 1055                 int                      soff;
 1056 
 1057                 m = m0;
 1058                 soff = off + sizeof(struct mldv2_query);
 1059                 nrecorded = 0;
 1060                 for (i = 0; i < nsrc; i++) {
 1061                         sp = mtod(m, uint8_t *) + soff;
 1062                         retval = in6m_record_source(inm,
 1063                             (const struct in6_addr *)sp);
 1064                         if (retval < 0)
 1065                                 break;
 1066                         nrecorded += retval;
 1067                         soff += sizeof(struct in6_addr);
 1068                         if (soff >= m->m_len) {
 1069                                 soff = soff - m->m_len;
 1070                                 m = m->m_next;
 1071                                 if (m == NULL)
 1072                                         break;
 1073                         }
 1074                 }
 1075                 if (nrecorded > 0) {
 1076                         CTR1(KTR_MLD,
 1077                             "%s: schedule response to SG query", __func__);
 1078                         inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER;
 1079                         inm->in6m_timer = MLD_RANDOM_DELAY(timer);
 1080                         V_current_state_timers_running6 = 1;
 1081                 }
 1082         }
 1083 
 1084         return (retval);
 1085 }
 1086 
 1087 /*
 1088  * Process a received MLDv1 host membership report.
 1089  * Assumes mld points to mld_hdr in pulled up mbuf chain.
 1090  *
 1091  * NOTE: Can't be fully const correct as we temporarily embed scope ID in
 1092  * mld_addr. This is OK as we own the mbuf chain.
 1093  */
 1094 static int
 1095 mld_v1_input_report(struct ifnet *ifp, const struct ip6_hdr *ip6,
 1096     /*const*/ struct mld_hdr *mld)
 1097 {
 1098         struct in6_addr          src, dst;
 1099         struct in6_ifaddr       *ia;
 1100         struct in6_multi        *inm;
 1101 #ifdef KTR
 1102         char                     ip6tbuf[INET6_ADDRSTRLEN];
 1103 #endif
 1104 
 1105         if (!mld_v1enable) {
 1106                 CTR3(KTR_MLD, "ignore v1 report %s on ifp %p(%s)",
 1107                     ip6_sprintf(ip6tbuf, &mld->mld_addr),
 1108                     ifp, if_name(ifp));
 1109                 return (0);
 1110         }
 1111 
 1112         if (ifp->if_flags & IFF_LOOPBACK)
 1113                 return (0);
 1114 
 1115         /*
 1116          * MLDv1 reports must originate from a host's link-local address,
 1117          * or the unspecified address (when booting).
 1118          */
 1119         src = ip6->ip6_src;
 1120         in6_clearscope(&src);
 1121         if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) {
 1122                 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
 1123                     ip6_sprintf(ip6tbuf, &ip6->ip6_src),
 1124                     ifp, if_name(ifp));
 1125                 return (EINVAL);
 1126         }
 1127 
 1128         /*
 1129          * RFC2710 Section 4: MLDv1 reports must pertain to a multicast
 1130          * group, and must be directed to the group itself.
 1131          */
 1132         dst = ip6->ip6_dst;
 1133         in6_clearscope(&dst);
 1134         if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) ||
 1135             !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) {
 1136                 CTR3(KTR_MLD, "ignore v1 query dst %s on ifp %p(%s)",
 1137                     ip6_sprintf(ip6tbuf, &ip6->ip6_dst),
 1138                     ifp, if_name(ifp));
 1139                 return (EINVAL);
 1140         }
 1141 
 1142         /*
 1143          * Make sure we don't hear our own membership report, as fast
 1144          * leave requires knowing that we are the only member of a
 1145          * group. Assume we used the link-local address if available,
 1146          * otherwise look for ::.
 1147          *
 1148          * XXX Note that scope ID comparison is needed for the address
 1149          * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
 1150          * performed for the on-wire address.
 1151          */
 1152         ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
 1153         if ((ia && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia))) ||
 1154             (ia == NULL && IN6_IS_ADDR_UNSPECIFIED(&src))) {
 1155                 if (ia != NULL)
 1156                         ifa_free(&ia->ia_ifa);
 1157                 return (0);
 1158         }
 1159         if (ia != NULL)
 1160                 ifa_free(&ia->ia_ifa);
 1161 
 1162         CTR3(KTR_MLD, "process v1 report %s on ifp %p(%s)",
 1163             ip6_sprintf(ip6tbuf, &mld->mld_addr), ifp, if_name(ifp));
 1164 
 1165         /*
 1166          * Embed scope ID of receiving interface in MLD query for lookup
 1167          * whilst we don't hold other locks (due to KAME locking lameness).
 1168          */
 1169         if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr))
 1170                 in6_setscope(&mld->mld_addr, ifp, NULL);
 1171 
 1172         IN6_MULTI_LIST_LOCK();
 1173         MLD_LOCK();
 1174         IF_ADDR_RLOCK(ifp);
 1175 
 1176         /*
 1177          * MLDv1 report suppression.
 1178          * If we are a member of this group, and our membership should be
 1179          * reported, and our group timer is pending or about to be reset,
 1180          * stop our group timer by transitioning to the 'lazy' state.
 1181          */
 1182         inm = in6m_lookup_locked(ifp, &mld->mld_addr);
 1183         if (inm != NULL) {
 1184                 struct mld_ifsoftc *mli;
 1185 
 1186                 mli = inm->in6m_mli;
 1187                 KASSERT(mli != NULL,
 1188                     ("%s: no mli for ifp %p", __func__, ifp));
 1189 
 1190                 /*
 1191                  * If we are in MLDv2 host mode, do not allow the
 1192                  * other host's MLDv1 report to suppress our reports.
 1193                  */
 1194                 if (mli->mli_version == MLD_VERSION_2)
 1195                         goto out_locked;
 1196 
 1197                 inm->in6m_timer = 0;
 1198 
 1199                 switch (inm->in6m_state) {
 1200                 case MLD_NOT_MEMBER:
 1201                 case MLD_SILENT_MEMBER:
 1202                 case MLD_SLEEPING_MEMBER:
 1203                         break;
 1204                 case MLD_REPORTING_MEMBER:
 1205                 case MLD_IDLE_MEMBER:
 1206                 case MLD_AWAKENING_MEMBER:
 1207                         CTR3(KTR_MLD,
 1208                             "report suppressed for %s on ifp %p(%s)",
 1209                             ip6_sprintf(ip6tbuf, &mld->mld_addr),
 1210                             ifp, if_name(ifp));
 1211                 case MLD_LAZY_MEMBER:
 1212                         inm->in6m_state = MLD_LAZY_MEMBER;
 1213                         break;
 1214                 case MLD_G_QUERY_PENDING_MEMBER:
 1215                 case MLD_SG_QUERY_PENDING_MEMBER:
 1216                 case MLD_LEAVING_MEMBER:
 1217                         break;
 1218                 }
 1219         }
 1220 
 1221 out_locked:
 1222         IF_ADDR_RUNLOCK(ifp);
 1223         MLD_UNLOCK();
 1224         IN6_MULTI_LIST_UNLOCK();
 1225 
 1226         /* XXX Clear embedded scope ID as userland won't expect it. */
 1227         in6_clearscope(&mld->mld_addr);
 1228 
 1229         return (0);
 1230 }
 1231 
 1232 /*
 1233  * MLD input path.
 1234  *
 1235  * Assume query messages which fit in a single ICMPv6 message header
 1236  * have been pulled up.
 1237  * Assume that userland will want to see the message, even if it
 1238  * otherwise fails kernel input validation; do not free it.
 1239  * Pullup may however free the mbuf chain m if it fails.
 1240  *
 1241  * Return IPPROTO_DONE if we freed m. Otherwise, return 0.
 1242  */
 1243 int
 1244 mld_input(struct mbuf *m, int off, int icmp6len)
 1245 {
 1246         struct ifnet    *ifp;
 1247         struct ip6_hdr  *ip6;
 1248         struct mld_hdr  *mld;
 1249         int              mldlen;
 1250 
 1251         CTR3(KTR_MLD, "%s: called w/mbuf (%p,%d)", __func__, m, off);
 1252 
 1253         ifp = m->m_pkthdr.rcvif;
 1254 
 1255         ip6 = mtod(m, struct ip6_hdr *);
 1256 
 1257         /* Pullup to appropriate size. */
 1258         mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
 1259         if (mld->mld_type == MLD_LISTENER_QUERY &&
 1260             icmp6len >= sizeof(struct mldv2_query)) {
 1261                 mldlen = sizeof(struct mldv2_query);
 1262         } else {
 1263                 mldlen = sizeof(struct mld_hdr);
 1264         }
 1265         IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen);
 1266         if (mld == NULL) {
 1267                 ICMP6STAT_INC(icp6s_badlen);
 1268                 return (IPPROTO_DONE);
 1269         }
 1270 
 1271         /*
 1272          * Userland needs to see all of this traffic for implementing
 1273          * the endpoint discovery portion of multicast routing.
 1274          */
 1275         switch (mld->mld_type) {
 1276         case MLD_LISTENER_QUERY:
 1277                 icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
 1278                 if (icmp6len == sizeof(struct mld_hdr)) {
 1279                         if (mld_v1_input_query(ifp, ip6, mld) != 0)
 1280                                 return (0);
 1281                 } else if (icmp6len >= sizeof(struct mldv2_query)) {
 1282                         if (mld_v2_input_query(ifp, ip6, m, off,
 1283                             icmp6len) != 0)
 1284                                 return (0);
 1285                 }
 1286                 break;
 1287         case MLD_LISTENER_REPORT:
 1288                 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
 1289                 if (mld_v1_input_report(ifp, ip6, mld) != 0)
 1290                         return (0);
 1291                 break;
 1292         case MLDV2_LISTENER_REPORT:
 1293                 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
 1294                 break;
 1295         case MLD_LISTENER_DONE:
 1296                 icmp6_ifstat_inc(ifp, ifs6_in_mlddone);
 1297                 break;
 1298         default:
 1299                 break;
 1300         }
 1301 
 1302         return (0);
 1303 }
 1304 
 1305 /*
 1306  * Fast timeout handler (global).
 1307  * VIMAGE: Timeout handlers are expected to service all vimages.
 1308  */
 1309 void
 1310 mld_fasttimo(void)
 1311 {
 1312         VNET_ITERATOR_DECL(vnet_iter);
 1313 
 1314         VNET_LIST_RLOCK_NOSLEEP();
 1315         VNET_FOREACH(vnet_iter) {
 1316                 CURVNET_SET(vnet_iter);
 1317                 mld_fasttimo_vnet();
 1318                 CURVNET_RESTORE();
 1319         }
 1320         VNET_LIST_RUNLOCK_NOSLEEP();
 1321 }
 1322 
 1323 /*
 1324  * Fast timeout handler (per-vnet).
 1325  *
 1326  * VIMAGE: Assume caller has set up our curvnet.
 1327  */
 1328 static void
 1329 mld_fasttimo_vnet(void)
 1330 {
 1331         struct mbufq             scq;   /* State-change packets */
 1332         struct mbufq             qrq;   /* Query response packets */
 1333         struct ifnet            *ifp;
 1334         struct mld_ifsoftc      *mli;
 1335         struct ifmultiaddr      *ifma, *next;
 1336         struct in6_multi        *inm, *tinm;
 1337         struct in6_multi_head inmh;
 1338         int                      uri_fasthz;
 1339 
 1340         uri_fasthz = 0;
 1341 
 1342         /*
 1343          * Quick check to see if any work needs to be done, in order to
 1344          * minimize the overhead of fasttimo processing.
 1345          * SMPng: XXX Unlocked reads.
 1346          */
 1347         if (!V_current_state_timers_running6 &&
 1348             !V_interface_timers_running6 &&
 1349             !V_state_change_timers_running6)
 1350                 return;
 1351 
 1352         SLIST_INIT(&inmh);
 1353         IN6_MULTI_LIST_LOCK();
 1354         MLD_LOCK();
 1355 
 1356         /*
 1357          * MLDv2 General Query response timer processing.
 1358          */
 1359         if (V_interface_timers_running6) {
 1360                 CTR1(KTR_MLD, "%s: interface timers running", __func__);
 1361 
 1362                 V_interface_timers_running6 = 0;
 1363                 LIST_FOREACH(mli, &V_mli_head, mli_link) {
 1364                         if (mli->mli_v2_timer == 0) {
 1365                                 /* Do nothing. */
 1366                         } else if (--mli->mli_v2_timer == 0) {
 1367                                 mld_v2_dispatch_general_query(mli);
 1368                         } else {
 1369                                 V_interface_timers_running6 = 1;
 1370                         }
 1371                 }
 1372         }
 1373 
 1374         if (!V_current_state_timers_running6 &&
 1375             !V_state_change_timers_running6)
 1376                 goto out_locked;
 1377 
 1378         V_current_state_timers_running6 = 0;
 1379         V_state_change_timers_running6 = 0;
 1380 
 1381         CTR1(KTR_MLD, "%s: state change timers running", __func__);
 1382 
 1383         /*
 1384          * MLD host report and state-change timer processing.
 1385          * Note: Processing a v2 group timer may remove a node.
 1386          */
 1387         LIST_FOREACH(mli, &V_mli_head, mli_link) {
 1388                 ifp = mli->mli_ifp;
 1389 
 1390                 if (mli->mli_version == MLD_VERSION_2) {
 1391                         uri_fasthz = MLD_RANDOM_DELAY(mli->mli_uri *
 1392                             PR_FASTHZ);
 1393                         mbufq_init(&qrq, MLD_MAX_G_GS_PACKETS);
 1394                         mbufq_init(&scq, MLD_MAX_STATE_CHANGE_PACKETS);
 1395                 }
 1396 
 1397                 IF_ADDR_WLOCK(ifp);
 1398         restart:
 1399                 CK_STAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) {
 1400                         if (ifma->ifma_addr->sa_family != AF_INET6 ||
 1401                             ifma->ifma_protospec == NULL)
 1402                                 continue;
 1403                         inm = (struct in6_multi *)ifma->ifma_protospec;
 1404                         switch (mli->mli_version) {
 1405                         case MLD_VERSION_1:
 1406                                 mld_v1_process_group_timer(&inmh, inm);
 1407                                 break;
 1408                         case MLD_VERSION_2:
 1409                                 mld_v2_process_group_timers(&inmh, &qrq,
 1410                                     &scq, inm, uri_fasthz);
 1411                                 break;
 1412                         }
 1413                         if (__predict_false(ifma6_restart)) {
 1414                                 ifma6_restart = false;
 1415                                 goto restart;
 1416                         }
 1417                 }
 1418                 IF_ADDR_WUNLOCK(ifp);
 1419 
 1420                 switch (mli->mli_version) {
 1421                 case MLD_VERSION_1:
 1422                         /*
 1423                          * Transmit reports for this lifecycle.  This
 1424                          * is done while not holding IF_ADDR_LOCK
 1425                          * since this can call
 1426                          * in6ifa_ifpforlinklocal() which locks
 1427                          * IF_ADDR_LOCK internally as well as
 1428                          * ip6_output() to transmit a packet.
 1429                          */
 1430                         SLIST_FOREACH_SAFE(inm, &inmh, in6m_nrele, tinm) {
 1431                                 SLIST_REMOVE_HEAD(&inmh,
 1432                                     in6m_nrele);
 1433                                 (void)mld_v1_transmit_report(inm,
 1434                                     MLD_LISTENER_REPORT);
 1435                         }
 1436                         break;
 1437                 case MLD_VERSION_2:
 1438                         mld_dispatch_queue(&qrq, 0);
 1439                         mld_dispatch_queue(&scq, 0);
 1440 
 1441                         /*
 1442                          * Free the in_multi reference(s) for
 1443                          * this lifecycle.
 1444                          */
 1445                         in6m_release_list_deferred(&inmh);
 1446                         break;
 1447                 }
 1448         }
 1449 
 1450 out_locked:
 1451         MLD_UNLOCK();
 1452         IN6_MULTI_LIST_UNLOCK();
 1453 }
 1454 
 1455 /*
 1456  * Update host report group timer.
 1457  * Will update the global pending timer flags.
 1458  */
 1459 static void
 1460 mld_v1_process_group_timer(struct in6_multi_head *inmh, struct in6_multi *inm)
 1461 {
 1462         int report_timer_expired;
 1463 
 1464         IN6_MULTI_LIST_LOCK_ASSERT();
 1465         MLD_LOCK_ASSERT();
 1466 
 1467         if (inm->in6m_timer == 0) {
 1468                 report_timer_expired = 0;
 1469         } else if (--inm->in6m_timer == 0) {
 1470                 report_timer_expired = 1;
 1471         } else {
 1472                 V_current_state_timers_running6 = 1;
 1473                 return;
 1474         }
 1475 
 1476         switch (inm->in6m_state) {
 1477         case MLD_NOT_MEMBER:
 1478         case MLD_SILENT_MEMBER:
 1479         case MLD_IDLE_MEMBER:
 1480         case MLD_LAZY_MEMBER:
 1481         case MLD_SLEEPING_MEMBER:
 1482         case MLD_AWAKENING_MEMBER:
 1483                 break;
 1484         case MLD_REPORTING_MEMBER:
 1485                 if (report_timer_expired) {
 1486                         inm->in6m_state = MLD_IDLE_MEMBER;
 1487                         in6m_disconnect(inm);
 1488                         in6m_rele_locked(inmh, inm);
 1489                 }
 1490                 break;
 1491         case MLD_G_QUERY_PENDING_MEMBER:
 1492         case MLD_SG_QUERY_PENDING_MEMBER:
 1493         case MLD_LEAVING_MEMBER:
 1494                 break;
 1495         }
 1496 }
 1497 
 1498 /*
 1499  * Update a group's timers for MLDv2.
 1500  * Will update the global pending timer flags.
 1501  * Note: Unlocked read from mli.
 1502  */
 1503 static void
 1504 mld_v2_process_group_timers(struct in6_multi_head *inmh,
 1505     struct mbufq *qrq, struct mbufq *scq,
 1506     struct in6_multi *inm, const int uri_fasthz)
 1507 {
 1508         int query_response_timer_expired;
 1509         int state_change_retransmit_timer_expired;
 1510 #ifdef KTR
 1511         char ip6tbuf[INET6_ADDRSTRLEN];
 1512 #endif
 1513 
 1514         IN6_MULTI_LIST_LOCK_ASSERT();
 1515         MLD_LOCK_ASSERT();
 1516 
 1517         query_response_timer_expired = 0;
 1518         state_change_retransmit_timer_expired = 0;
 1519 
 1520         /*
 1521          * During a transition from compatibility mode back to MLDv2,
 1522          * a group record in REPORTING state may still have its group
 1523          * timer active. This is a no-op in this function; it is easier
 1524          * to deal with it here than to complicate the slow-timeout path.
 1525          */
 1526         if (inm->in6m_timer == 0) {
 1527                 query_response_timer_expired = 0;
 1528         } else if (--inm->in6m_timer == 0) {
 1529                 query_response_timer_expired = 1;
 1530         } else {
 1531                 V_current_state_timers_running6 = 1;
 1532         }
 1533 
 1534         if (inm->in6m_sctimer == 0) {
 1535                 state_change_retransmit_timer_expired = 0;
 1536         } else if (--inm->in6m_sctimer == 0) {
 1537                 state_change_retransmit_timer_expired = 1;
 1538         } else {
 1539                 V_state_change_timers_running6 = 1;
 1540         }
 1541 
 1542         /* We are in fasttimo, so be quick about it. */
 1543         if (!state_change_retransmit_timer_expired &&
 1544             !query_response_timer_expired)
 1545                 return;
 1546 
 1547         switch (inm->in6m_state) {
 1548         case MLD_NOT_MEMBER:
 1549         case MLD_SILENT_MEMBER:
 1550         case MLD_SLEEPING_MEMBER:
 1551         case MLD_LAZY_MEMBER:
 1552         case MLD_AWAKENING_MEMBER:
 1553         case MLD_IDLE_MEMBER:
 1554                 break;
 1555         case MLD_G_QUERY_PENDING_MEMBER:
 1556         case MLD_SG_QUERY_PENDING_MEMBER:
 1557                 /*
 1558                  * Respond to a previously pending Group-Specific
 1559                  * or Group-and-Source-Specific query by enqueueing
 1560                  * the appropriate Current-State report for
 1561                  * immediate transmission.
 1562                  */
 1563                 if (query_response_timer_expired) {
 1564                         int retval;
 1565 
 1566                         retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1,
 1567                             (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER),
 1568                             0);
 1569                         CTR2(KTR_MLD, "%s: enqueue record = %d",
 1570                             __func__, retval);
 1571                         inm->in6m_state = MLD_REPORTING_MEMBER;
 1572                         in6m_clear_recorded(inm);
 1573                 }
 1574                 /* FALLTHROUGH */
 1575         case MLD_REPORTING_MEMBER:
 1576         case MLD_LEAVING_MEMBER:
 1577                 if (state_change_retransmit_timer_expired) {
 1578                         /*
 1579                          * State-change retransmission timer fired.
 1580                          * If there are any further pending retransmissions,
 1581                          * set the global pending state-change flag, and
 1582                          * reset the timer.
 1583                          */
 1584                         if (--inm->in6m_scrv > 0) {
 1585                                 inm->in6m_sctimer = uri_fasthz;
 1586                                 V_state_change_timers_running6 = 1;
 1587                         }
 1588                         /*
 1589                          * Retransmit the previously computed state-change
 1590                          * report. If there are no further pending
 1591                          * retransmissions, the mbuf queue will be consumed.
 1592                          * Update T0 state to T1 as we have now sent
 1593                          * a state-change.
 1594                          */
 1595                         (void)mld_v2_merge_state_changes(inm, scq);
 1596 
 1597                         in6m_commit(inm);
 1598                         CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
 1599                             ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 1600                             if_name(inm->in6m_ifp));
 1601 
 1602                         /*
 1603                          * If we are leaving the group for good, make sure
 1604                          * we release MLD's reference to it.
 1605                          * This release must be deferred using a SLIST,
 1606                          * as we are called from a loop which traverses
 1607                          * the in_ifmultiaddr TAILQ.
 1608                          */
 1609                         if (inm->in6m_state == MLD_LEAVING_MEMBER &&
 1610                             inm->in6m_scrv == 0) {
 1611                                 inm->in6m_state = MLD_NOT_MEMBER;
 1612                                 in6m_disconnect(inm);
 1613                                 in6m_rele_locked(inmh, inm);
 1614                         }
 1615                 }
 1616                 break;
 1617         }
 1618 }
 1619 
 1620 /*
 1621  * Switch to a different version on the given interface,
 1622  * as per Section 9.12.
 1623  */
 1624 static void
 1625 mld_set_version(struct mld_ifsoftc *mli, const int version)
 1626 {
 1627         int old_version_timer;
 1628 
 1629         MLD_LOCK_ASSERT();
 1630 
 1631         CTR4(KTR_MLD, "%s: switching to v%d on ifp %p(%s)", __func__,
 1632             version, mli->mli_ifp, if_name(mli->mli_ifp));
 1633 
 1634         if (version == MLD_VERSION_1) {
 1635                 /*
 1636                  * Compute the "Older Version Querier Present" timer as per
 1637                  * Section 9.12.
 1638                  */
 1639                 old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri;
 1640                 old_version_timer *= PR_SLOWHZ;
 1641                 mli->mli_v1_timer = old_version_timer;
 1642         }
 1643 
 1644         if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) {
 1645                 mli->mli_version = MLD_VERSION_1;
 1646                 mld_v2_cancel_link_timers(mli);
 1647         }
 1648 }
 1649 
 1650 /*
 1651  * Cancel pending MLDv2 timers for the given link and all groups
 1652  * joined on it; state-change, general-query, and group-query timers.
 1653  */
 1654 static void
 1655 mld_v2_cancel_link_timers(struct mld_ifsoftc *mli)
 1656 {
 1657         struct ifmultiaddr      *ifma, *next;
 1658         struct ifnet            *ifp;
 1659         struct in6_multi        *inm;
 1660         struct in6_multi_head inmh;
 1661 
 1662         CTR3(KTR_MLD, "%s: cancel v2 timers on ifp %p(%s)", __func__,
 1663             mli->mli_ifp, if_name(mli->mli_ifp));
 1664 
 1665         SLIST_INIT(&inmh);
 1666         IN6_MULTI_LIST_LOCK_ASSERT();
 1667         MLD_LOCK_ASSERT();
 1668 
 1669         /*
 1670          * Fast-track this potentially expensive operation
 1671          * by checking all the global 'timer pending' flags.
 1672          */
 1673         if (!V_interface_timers_running6 &&
 1674             !V_state_change_timers_running6 &&
 1675             !V_current_state_timers_running6)
 1676                 return;
 1677 
 1678         mli->mli_v2_timer = 0;
 1679 
 1680         ifp = mli->mli_ifp;
 1681 
 1682         IF_ADDR_WLOCK(ifp);
 1683  restart:
 1684         CK_STAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) {
 1685                 if (ifma->ifma_addr->sa_family != AF_INET6 ||
 1686                     ifma->ifma_protospec == NULL)
 1687                         continue;
 1688                 inm = (struct in6_multi *)ifma->ifma_protospec;
 1689                 switch (inm->in6m_state) {
 1690                 case MLD_NOT_MEMBER:
 1691                 case MLD_SILENT_MEMBER:
 1692                 case MLD_IDLE_MEMBER:
 1693                 case MLD_LAZY_MEMBER:
 1694                 case MLD_SLEEPING_MEMBER:
 1695                 case MLD_AWAKENING_MEMBER:
 1696                         break;
 1697                 case MLD_LEAVING_MEMBER:
 1698                         /*
 1699                          * If we are leaving the group and switching
 1700                          * version, we need to release the final
 1701                          * reference held for issuing the INCLUDE {}.
 1702                          */
 1703                         in6m_disconnect(inm);
 1704                         in6m_rele_locked(&inmh, inm);
 1705                         ifma->ifma_protospec = NULL;
 1706                         /* FALLTHROUGH */
 1707                 case MLD_G_QUERY_PENDING_MEMBER:
 1708                 case MLD_SG_QUERY_PENDING_MEMBER:
 1709                         in6m_clear_recorded(inm);
 1710                         /* FALLTHROUGH */
 1711                 case MLD_REPORTING_MEMBER:
 1712                         inm->in6m_sctimer = 0;
 1713                         inm->in6m_timer = 0;
 1714                         inm->in6m_state = MLD_REPORTING_MEMBER;
 1715                         /*
 1716                          * Free any pending MLDv2 state-change records.
 1717                          */
 1718                         mbufq_drain(&inm->in6m_scq);
 1719                         break;
 1720                 }
 1721                 if (__predict_false(ifma6_restart)) {
 1722                         ifma6_restart = false;
 1723                         goto restart;
 1724                 }
 1725         }
 1726         IF_ADDR_WUNLOCK(ifp);
 1727         in6m_release_list_deferred(&inmh);
 1728 }
 1729 
 1730 /*
 1731  * Global slowtimo handler.
 1732  * VIMAGE: Timeout handlers are expected to service all vimages.
 1733  */
 1734 void
 1735 mld_slowtimo(void)
 1736 {
 1737         VNET_ITERATOR_DECL(vnet_iter);
 1738 
 1739         VNET_LIST_RLOCK_NOSLEEP();
 1740         VNET_FOREACH(vnet_iter) {
 1741                 CURVNET_SET(vnet_iter);
 1742                 mld_slowtimo_vnet();
 1743                 CURVNET_RESTORE();
 1744         }
 1745         VNET_LIST_RUNLOCK_NOSLEEP();
 1746 }
 1747 
 1748 /*
 1749  * Per-vnet slowtimo handler.
 1750  */
 1751 static void
 1752 mld_slowtimo_vnet(void)
 1753 {
 1754         struct mld_ifsoftc *mli;
 1755 
 1756         MLD_LOCK();
 1757 
 1758         LIST_FOREACH(mli, &V_mli_head, mli_link) {
 1759                 mld_v1_process_querier_timers(mli);
 1760         }
 1761 
 1762         MLD_UNLOCK();
 1763 }
 1764 
 1765 /*
 1766  * Update the Older Version Querier Present timers for a link.
 1767  * See Section 9.12 of RFC 3810.
 1768  */
 1769 static void
 1770 mld_v1_process_querier_timers(struct mld_ifsoftc *mli)
 1771 {
 1772 
 1773         MLD_LOCK_ASSERT();
 1774 
 1775         if (mli->mli_version != MLD_VERSION_2 && --mli->mli_v1_timer == 0) {
 1776                 /*
 1777                  * MLDv1 Querier Present timer expired; revert to MLDv2.
 1778                  */
 1779                 CTR5(KTR_MLD,
 1780                     "%s: transition from v%d -> v%d on %p(%s)",
 1781                     __func__, mli->mli_version, MLD_VERSION_2,
 1782                     mli->mli_ifp, if_name(mli->mli_ifp));
 1783                 mli->mli_version = MLD_VERSION_2;
 1784         }
 1785 }
 1786 
 1787 /*
 1788  * Transmit an MLDv1 report immediately.
 1789  */
 1790 static int
 1791 mld_v1_transmit_report(struct in6_multi *in6m, const int type)
 1792 {
 1793         struct ifnet            *ifp;
 1794         struct in6_ifaddr       *ia;
 1795         struct ip6_hdr          *ip6;
 1796         struct mbuf             *mh, *md;
 1797         struct mld_hdr          *mld;
 1798 
 1799         IN6_MULTI_LIST_LOCK_ASSERT();
 1800         MLD_LOCK_ASSERT();
 1801         
 1802         ifp = in6m->in6m_ifp;
 1803         /* in process of being freed */
 1804         if (ifp == NULL)
 1805                 return (0);
 1806         ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
 1807         /* ia may be NULL if link-local address is tentative. */
 1808 
 1809         mh = m_gethdr(M_NOWAIT, MT_DATA);
 1810         if (mh == NULL) {
 1811                 if (ia != NULL)
 1812                         ifa_free(&ia->ia_ifa);
 1813                 return (ENOMEM);
 1814         }
 1815         md = m_get(M_NOWAIT, MT_DATA);
 1816         if (md == NULL) {
 1817                 m_free(mh);
 1818                 if (ia != NULL)
 1819                         ifa_free(&ia->ia_ifa);
 1820                 return (ENOMEM);
 1821         }
 1822         mh->m_next = md;
 1823 
 1824         /*
 1825          * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so
 1826          * that ether_output() does not need to allocate another mbuf
 1827          * for the header in the most common case.
 1828          */
 1829         M_ALIGN(mh, sizeof(struct ip6_hdr));
 1830         mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr);
 1831         mh->m_len = sizeof(struct ip6_hdr);
 1832 
 1833         ip6 = mtod(mh, struct ip6_hdr *);
 1834         ip6->ip6_flow = 0;
 1835         ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
 1836         ip6->ip6_vfc |= IPV6_VERSION;
 1837         ip6->ip6_nxt = IPPROTO_ICMPV6;
 1838         ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
 1839         ip6->ip6_dst = in6m->in6m_addr;
 1840 
 1841         md->m_len = sizeof(struct mld_hdr);
 1842         mld = mtod(md, struct mld_hdr *);
 1843         mld->mld_type = type;
 1844         mld->mld_code = 0;
 1845         mld->mld_cksum = 0;
 1846         mld->mld_maxdelay = 0;
 1847         mld->mld_reserved = 0;
 1848         mld->mld_addr = in6m->in6m_addr;
 1849         in6_clearscope(&mld->mld_addr);
 1850         mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
 1851             sizeof(struct ip6_hdr), sizeof(struct mld_hdr));
 1852 
 1853         mld_save_context(mh, ifp);
 1854         mh->m_flags |= M_MLDV1;
 1855 
 1856         mld_dispatch_packet(mh);
 1857 
 1858         if (ia != NULL)
 1859                 ifa_free(&ia->ia_ifa);
 1860         return (0);
 1861 }
 1862 
 1863 /*
 1864  * Process a state change from the upper layer for the given IPv6 group.
 1865  *
 1866  * Each socket holds a reference on the in_multi in its own ip_moptions.
 1867  * The socket layer will have made the necessary updates to.the group
 1868  * state, it is now up to MLD to issue a state change report if there
 1869  * has been any change between T0 (when the last state-change was issued)
 1870  * and T1 (now).
 1871  *
 1872  * We use the MLDv2 state machine at group level. The MLd module
 1873  * however makes the decision as to which MLD protocol version to speak.
 1874  * A state change *from* INCLUDE {} always means an initial join.
 1875  * A state change *to* INCLUDE {} always means a final leave.
 1876  *
 1877  * If delay is non-zero, and the state change is an initial multicast
 1878  * join, the state change report will be delayed by 'delay' ticks
 1879  * in units of PR_FASTHZ if MLDv1 is active on the link; otherwise
 1880  * the initial MLDv2 state change report will be delayed by whichever
 1881  * is sooner, a pending state-change timer or delay itself.
 1882  *
 1883  * VIMAGE: curvnet should have been set by caller, as this routine
 1884  * is called from the socket option handlers.
 1885  */
 1886 int
 1887 mld_change_state(struct in6_multi *inm, const int delay)
 1888 {
 1889         struct mld_ifsoftc *mli;
 1890         struct ifnet *ifp;
 1891         int error;
 1892 
 1893         IN6_MULTI_LIST_LOCK_ASSERT();
 1894 
 1895         error = 0;
 1896 
 1897         /*
 1898          * Try to detect if the upper layer just asked us to change state
 1899          * for an interface which has now gone away.
 1900          */
 1901         KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__));
 1902         ifp = inm->in6m_ifma->ifma_ifp;
 1903         if (ifp == NULL)
 1904                 return (0);
 1905         /*
 1906          * Sanity check that netinet6's notion of ifp is the
 1907          * same as net's.
 1908          */
 1909         KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__));
 1910 
 1911         MLD_LOCK();
 1912         mli = MLD_IFINFO(ifp);
 1913         KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
 1914 
 1915         /*
 1916          * If we detect a state transition to or from MCAST_UNDEFINED
 1917          * for this group, then we are starting or finishing an MLD
 1918          * life cycle for this group.
 1919          */
 1920         if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) {
 1921                 CTR3(KTR_MLD, "%s: inm transition %d -> %d", __func__,
 1922                     inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode);
 1923                 if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) {
 1924                         CTR1(KTR_MLD, "%s: initial join", __func__);
 1925                         error = mld_initial_join(inm, mli, delay);
 1926                         goto out_locked;
 1927                 } else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) {
 1928                         CTR1(KTR_MLD, "%s: final leave", __func__);
 1929                         mld_final_leave(inm, mli);
 1930                         goto out_locked;
 1931                 }
 1932         } else {
 1933                 CTR1(KTR_MLD, "%s: filter set change", __func__);
 1934         }
 1935 
 1936         error = mld_handle_state_change(inm, mli);
 1937 
 1938 out_locked:
 1939         MLD_UNLOCK();
 1940         return (error);
 1941 }
 1942 
 1943 /*
 1944  * Perform the initial join for an MLD group.
 1945  *
 1946  * When joining a group:
 1947  *  If the group should have its MLD traffic suppressed, do nothing.
 1948  *  MLDv1 starts sending MLDv1 host membership reports.
 1949  *  MLDv2 will schedule an MLDv2 state-change report containing the
 1950  *  initial state of the membership.
 1951  *
 1952  * If the delay argument is non-zero, then we must delay sending the
 1953  * initial state change for delay ticks (in units of PR_FASTHZ).
 1954  */
 1955 static int
 1956 mld_initial_join(struct in6_multi *inm, struct mld_ifsoftc *mli,
 1957     const int delay)
 1958 {
 1959         struct ifnet            *ifp;
 1960         struct mbufq            *mq;
 1961         int                      error, retval, syncstates;
 1962         int                      odelay;
 1963 #ifdef KTR
 1964         char                     ip6tbuf[INET6_ADDRSTRLEN];
 1965 #endif
 1966 
 1967         CTR4(KTR_MLD, "%s: initial join %s on ifp %p(%s)",
 1968             __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 1969             inm->in6m_ifp, if_name(inm->in6m_ifp));
 1970 
 1971         error = 0;
 1972         syncstates = 1;
 1973 
 1974         ifp = inm->in6m_ifp;
 1975 
 1976         IN6_MULTI_LIST_LOCK_ASSERT();
 1977         MLD_LOCK_ASSERT();
 1978 
 1979         KASSERT(mli && mli->mli_ifp == ifp, ("%s: inconsistent ifp", __func__));
 1980 
 1981         /*
 1982          * Groups joined on loopback or marked as 'not reported',
 1983          * enter the MLD_SILENT_MEMBER state and
 1984          * are never reported in any protocol exchanges.
 1985          * All other groups enter the appropriate state machine
 1986          * for the version in use on this link.
 1987          * A link marked as MLIF_SILENT causes MLD to be completely
 1988          * disabled for the link.
 1989          */
 1990         if ((ifp->if_flags & IFF_LOOPBACK) ||
 1991             (mli->mli_flags & MLIF_SILENT) ||
 1992             !mld_is_addr_reported(&inm->in6m_addr)) {
 1993                 CTR1(KTR_MLD,
 1994 "%s: not kicking state machine for silent group", __func__);
 1995                 inm->in6m_state = MLD_SILENT_MEMBER;
 1996                 inm->in6m_timer = 0;
 1997         } else {
 1998                 /*
 1999                  * Deal with overlapping in_multi lifecycle.
 2000                  * If this group was LEAVING, then make sure
 2001                  * we drop the reference we picked up to keep the
 2002                  * group around for the final INCLUDE {} enqueue.
 2003                  */
 2004                 if (mli->mli_version == MLD_VERSION_2 &&
 2005                     inm->in6m_state == MLD_LEAVING_MEMBER) {
 2006                         inm->in6m_refcount--;
 2007                 }
 2008                 inm->in6m_state = MLD_REPORTING_MEMBER;
 2009 
 2010                 switch (mli->mli_version) {
 2011                 case MLD_VERSION_1:
 2012                         /*
 2013                          * If a delay was provided, only use it if
 2014                          * it is greater than the delay normally
 2015                          * used for an MLDv1 state change report,
 2016                          * and delay sending the initial MLDv1 report
 2017                          * by not transitioning to the IDLE state.
 2018                          */
 2019                         odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI * PR_FASTHZ);
 2020                         if (delay) {
 2021                                 inm->in6m_timer = max(delay, odelay);
 2022                                 V_current_state_timers_running6 = 1;
 2023                         } else {
 2024                                 inm->in6m_state = MLD_IDLE_MEMBER;
 2025                                 error = mld_v1_transmit_report(inm,
 2026                                      MLD_LISTENER_REPORT);
 2027                                 if (error == 0) {
 2028                                         inm->in6m_timer = odelay;
 2029                                         V_current_state_timers_running6 = 1;
 2030                                 }
 2031                         }
 2032                         break;
 2033 
 2034                 case MLD_VERSION_2:
 2035                         /*
 2036                          * Defer update of T0 to T1, until the first copy
 2037                          * of the state change has been transmitted.
 2038                          */
 2039                         syncstates = 0;
 2040 
 2041                         /*
 2042                          * Immediately enqueue a State-Change Report for
 2043                          * this interface, freeing any previous reports.
 2044                          * Don't kick the timers if there is nothing to do,
 2045                          * or if an error occurred.
 2046                          */
 2047                         mq = &inm->in6m_scq;
 2048                         mbufq_drain(mq);
 2049                         retval = mld_v2_enqueue_group_record(mq, inm, 1,
 2050                             0, 0, (mli->mli_flags & MLIF_USEALLOW));
 2051                         CTR2(KTR_MLD, "%s: enqueue record = %d",
 2052                             __func__, retval);
 2053                         if (retval <= 0) {
 2054                                 error = retval * -1;
 2055                                 break;
 2056                         }
 2057 
 2058                         /*
 2059                          * Schedule transmission of pending state-change
 2060                          * report up to RV times for this link. The timer
 2061                          * will fire at the next mld_fasttimo (~200ms),
 2062                          * giving us an opportunity to merge the reports.
 2063                          *
 2064                          * If a delay was provided to this function, only
 2065                          * use this delay if sooner than the existing one.
 2066                          */
 2067                         KASSERT(mli->mli_rv > 1,
 2068                            ("%s: invalid robustness %d", __func__,
 2069                             mli->mli_rv));
 2070                         inm->in6m_scrv = mli->mli_rv;
 2071                         if (delay) {
 2072                                 if (inm->in6m_sctimer > 1) {
 2073                                         inm->in6m_sctimer =
 2074                                             min(inm->in6m_sctimer, delay);
 2075                                 } else
 2076                                         inm->in6m_sctimer = delay;
 2077                         } else
 2078                                 inm->in6m_sctimer = 1;
 2079                         V_state_change_timers_running6 = 1;
 2080 
 2081                         error = 0;
 2082                         break;
 2083                 }
 2084         }
 2085 
 2086         /*
 2087          * Only update the T0 state if state change is atomic,
 2088          * i.e. we don't need to wait for a timer to fire before we
 2089          * can consider the state change to have been communicated.
 2090          */
 2091         if (syncstates) {
 2092                 in6m_commit(inm);
 2093                 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
 2094                     ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2095                     if_name(inm->in6m_ifp));
 2096         }
 2097 
 2098         return (error);
 2099 }
 2100 
 2101 /*
 2102  * Issue an intermediate state change during the life-cycle.
 2103  */
 2104 static int
 2105 mld_handle_state_change(struct in6_multi *inm, struct mld_ifsoftc *mli)
 2106 {
 2107         struct ifnet            *ifp;
 2108         int                      retval;
 2109 #ifdef KTR
 2110         char                     ip6tbuf[INET6_ADDRSTRLEN];
 2111 #endif
 2112 
 2113         CTR4(KTR_MLD, "%s: state change for %s on ifp %p(%s)",
 2114             __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2115             inm->in6m_ifp, if_name(inm->in6m_ifp));
 2116 
 2117         ifp = inm->in6m_ifp;
 2118 
 2119         IN6_MULTI_LIST_LOCK_ASSERT();
 2120         MLD_LOCK_ASSERT();
 2121 
 2122         KASSERT(mli && mli->mli_ifp == ifp,
 2123             ("%s: inconsistent ifp", __func__));
 2124 
 2125         if ((ifp->if_flags & IFF_LOOPBACK) ||
 2126             (mli->mli_flags & MLIF_SILENT) ||
 2127             !mld_is_addr_reported(&inm->in6m_addr) ||
 2128             (mli->mli_version != MLD_VERSION_2)) {
 2129                 if (!mld_is_addr_reported(&inm->in6m_addr)) {
 2130                         CTR1(KTR_MLD,
 2131 "%s: not kicking state machine for silent group", __func__);
 2132                 }
 2133                 CTR1(KTR_MLD, "%s: nothing to do", __func__);
 2134                 in6m_commit(inm);
 2135                 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
 2136                     ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2137                     if_name(inm->in6m_ifp));
 2138                 return (0);
 2139         }
 2140 
 2141         mbufq_drain(&inm->in6m_scq);
 2142 
 2143         retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0,
 2144             (mli->mli_flags & MLIF_USEALLOW));
 2145         CTR2(KTR_MLD, "%s: enqueue record = %d", __func__, retval);
 2146         if (retval <= 0)
 2147                 return (-retval);
 2148 
 2149         /*
 2150          * If record(s) were enqueued, start the state-change
 2151          * report timer for this group.
 2152          */
 2153         inm->in6m_scrv = mli->mli_rv;
 2154         inm->in6m_sctimer = 1;
 2155         V_state_change_timers_running6 = 1;
 2156 
 2157         return (0);
 2158 }
 2159 
 2160 /*
 2161  * Perform the final leave for a multicast address.
 2162  *
 2163  * When leaving a group:
 2164  *  MLDv1 sends a DONE message, if and only if we are the reporter.
 2165  *  MLDv2 enqueues a state-change report containing a transition
 2166  *  to INCLUDE {} for immediate transmission.
 2167  */
 2168 static void
 2169 mld_final_leave(struct in6_multi *inm, struct mld_ifsoftc *mli)
 2170 {
 2171         int syncstates;
 2172 #ifdef KTR
 2173         char ip6tbuf[INET6_ADDRSTRLEN];
 2174 #endif
 2175 
 2176         syncstates = 1;
 2177 
 2178         CTR4(KTR_MLD, "%s: final leave %s on ifp %p(%s)",
 2179             __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2180             inm->in6m_ifp, if_name(inm->in6m_ifp));
 2181 
 2182         IN6_MULTI_LIST_LOCK_ASSERT();
 2183         MLD_LOCK_ASSERT();
 2184 
 2185         switch (inm->in6m_state) {
 2186         case MLD_NOT_MEMBER:
 2187         case MLD_SILENT_MEMBER:
 2188         case MLD_LEAVING_MEMBER:
 2189                 /* Already leaving or left; do nothing. */
 2190                 CTR1(KTR_MLD,
 2191 "%s: not kicking state machine for silent group", __func__);
 2192                 break;
 2193         case MLD_REPORTING_MEMBER:
 2194         case MLD_IDLE_MEMBER:
 2195         case MLD_G_QUERY_PENDING_MEMBER:
 2196         case MLD_SG_QUERY_PENDING_MEMBER:
 2197                 if (mli->mli_version == MLD_VERSION_1) {
 2198 #ifdef INVARIANTS
 2199                         if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
 2200                             inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER)
 2201                         panic("%s: MLDv2 state reached, not MLDv2 mode",
 2202                              __func__);
 2203 #endif
 2204                         mld_v1_transmit_report(inm, MLD_LISTENER_DONE);
 2205                         inm->in6m_state = MLD_NOT_MEMBER;
 2206                         V_current_state_timers_running6 = 1;
 2207                 } else if (mli->mli_version == MLD_VERSION_2) {
 2208                         /*
 2209                          * Stop group timer and all pending reports.
 2210                          * Immediately enqueue a state-change report
 2211                          * TO_IN {} to be sent on the next fast timeout,
 2212                          * giving us an opportunity to merge reports.
 2213                          */
 2214                         mbufq_drain(&inm->in6m_scq);
 2215                         inm->in6m_timer = 0;
 2216                         inm->in6m_scrv = mli->mli_rv;
 2217                         CTR4(KTR_MLD, "%s: Leaving %s/%s with %d "
 2218                             "pending retransmissions.", __func__,
 2219                             ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2220                             if_name(inm->in6m_ifp), inm->in6m_scrv);
 2221                         if (inm->in6m_scrv == 0) {
 2222                                 inm->in6m_state = MLD_NOT_MEMBER;
 2223                                 inm->in6m_sctimer = 0;
 2224                         } else {
 2225                                 int retval;
 2226 
 2227                                 in6m_acquire_locked(inm);
 2228 
 2229                                 retval = mld_v2_enqueue_group_record(
 2230                                     &inm->in6m_scq, inm, 1, 0, 0,
 2231                                     (mli->mli_flags & MLIF_USEALLOW));
 2232                                 KASSERT(retval != 0,
 2233                                     ("%s: enqueue record = %d", __func__,
 2234                                      retval));
 2235 
 2236                                 inm->in6m_state = MLD_LEAVING_MEMBER;
 2237                                 inm->in6m_sctimer = 1;
 2238                                 V_state_change_timers_running6 = 1;
 2239                                 syncstates = 0;
 2240                         }
 2241                         break;
 2242                 }
 2243                 break;
 2244         case MLD_LAZY_MEMBER:
 2245         case MLD_SLEEPING_MEMBER:
 2246         case MLD_AWAKENING_MEMBER:
 2247                 /* Our reports are suppressed; do nothing. */
 2248                 break;
 2249         }
 2250 
 2251         if (syncstates) {
 2252                 in6m_commit(inm);
 2253                 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
 2254                     ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2255                     if_name(inm->in6m_ifp));
 2256                 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
 2257                 CTR3(KTR_MLD, "%s: T1 now MCAST_UNDEFINED for %p/%s",
 2258                     __func__, &inm->in6m_addr, if_name(inm->in6m_ifp));
 2259         }
 2260 }
 2261 
 2262 /*
 2263  * Enqueue an MLDv2 group record to the given output queue.
 2264  *
 2265  * If is_state_change is zero, a current-state record is appended.
 2266  * If is_state_change is non-zero, a state-change report is appended.
 2267  *
 2268  * If is_group_query is non-zero, an mbuf packet chain is allocated.
 2269  * If is_group_query is zero, and if there is a packet with free space
 2270  * at the tail of the queue, it will be appended to providing there
 2271  * is enough free space.
 2272  * Otherwise a new mbuf packet chain is allocated.
 2273  *
 2274  * If is_source_query is non-zero, each source is checked to see if
 2275  * it was recorded for a Group-Source query, and will be omitted if
 2276  * it is not both in-mode and recorded.
 2277  *
 2278  * If use_block_allow is non-zero, state change reports for initial join
 2279  * and final leave, on an inclusive mode group with a source list, will be
 2280  * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively.
 2281  *
 2282  * The function will attempt to allocate leading space in the packet
 2283  * for the IPv6+ICMP headers to be prepended without fragmenting the chain.
 2284  *
 2285  * If successful the size of all data appended to the queue is returned,
 2286  * otherwise an error code less than zero is returned, or zero if
 2287  * no record(s) were appended.
 2288  */
 2289 static int
 2290 mld_v2_enqueue_group_record(struct mbufq *mq, struct in6_multi *inm,
 2291     const int is_state_change, const int is_group_query,
 2292     const int is_source_query, const int use_block_allow)
 2293 {
 2294         struct mldv2_record      mr;
 2295         struct mldv2_record     *pmr;
 2296         struct ifnet            *ifp;
 2297         struct ip6_msource      *ims, *nims;
 2298         struct mbuf             *m0, *m, *md;
 2299         int                      is_filter_list_change;
 2300         int                      minrec0len, m0srcs, msrcs, nbytes, off;
 2301         int                      record_has_sources;
 2302         int                      now;
 2303         int                      type;
 2304         uint8_t                  mode;
 2305 #ifdef KTR
 2306         char                     ip6tbuf[INET6_ADDRSTRLEN];
 2307 #endif
 2308 
 2309         IN6_MULTI_LIST_LOCK_ASSERT();
 2310 
 2311         ifp = inm->in6m_ifp;
 2312         is_filter_list_change = 0;
 2313         m = NULL;
 2314         m0 = NULL;
 2315         m0srcs = 0;
 2316         msrcs = 0;
 2317         nbytes = 0;
 2318         nims = NULL;
 2319         record_has_sources = 1;
 2320         pmr = NULL;
 2321         type = MLD_DO_NOTHING;
 2322         mode = inm->in6m_st[1].iss_fmode;
 2323 
 2324         /*
 2325          * If we did not transition out of ASM mode during t0->t1,
 2326          * and there are no source nodes to process, we can skip
 2327          * the generation of source records.
 2328          */
 2329         if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 &&
 2330             inm->in6m_nsrc == 0)
 2331                 record_has_sources = 0;
 2332 
 2333         if (is_state_change) {
 2334                 /*
 2335                  * Queue a state change record.
 2336                  * If the mode did not change, and there are non-ASM
 2337                  * listeners or source filters present,
 2338                  * we potentially need to issue two records for the group.
 2339                  * If there are ASM listeners, and there was no filter
 2340                  * mode transition of any kind, do nothing.
 2341                  *
 2342                  * If we are transitioning to MCAST_UNDEFINED, we need
 2343                  * not send any sources. A transition to/from this state is
 2344                  * considered inclusive with some special treatment.
 2345                  *
 2346                  * If we are rewriting initial joins/leaves to use
 2347                  * ALLOW/BLOCK, and the group's membership is inclusive,
 2348                  * we need to send sources in all cases.
 2349                  */
 2350                 if (mode != inm->in6m_st[0].iss_fmode) {
 2351                         if (mode == MCAST_EXCLUDE) {
 2352                                 CTR1(KTR_MLD, "%s: change to EXCLUDE",
 2353                                     __func__);
 2354                                 type = MLD_CHANGE_TO_EXCLUDE_MODE;
 2355                         } else {
 2356                                 CTR1(KTR_MLD, "%s: change to INCLUDE",
 2357                                     __func__);
 2358                                 if (use_block_allow) {
 2359                                         /*
 2360                                          * XXX
 2361                                          * Here we're interested in state
 2362                                          * edges either direction between
 2363                                          * MCAST_UNDEFINED and MCAST_INCLUDE.
 2364                                          * Perhaps we should just check
 2365                                          * the group state, rather than
 2366                                          * the filter mode.
 2367                                          */
 2368                                         if (mode == MCAST_UNDEFINED) {
 2369                                                 type = MLD_BLOCK_OLD_SOURCES;
 2370                                         } else {
 2371                                                 type = MLD_ALLOW_NEW_SOURCES;
 2372                                         }
 2373                                 } else {
 2374                                         type = MLD_CHANGE_TO_INCLUDE_MODE;
 2375                                         if (mode == MCAST_UNDEFINED)
 2376                                                 record_has_sources = 0;
 2377                                 }
 2378                         }
 2379                 } else {
 2380                         if (record_has_sources) {
 2381                                 is_filter_list_change = 1;
 2382                         } else {
 2383                                 type = MLD_DO_NOTHING;
 2384                         }
 2385                 }
 2386         } else {
 2387                 /*
 2388                  * Queue a current state record.
 2389                  */
 2390                 if (mode == MCAST_EXCLUDE) {
 2391                         type = MLD_MODE_IS_EXCLUDE;
 2392                 } else if (mode == MCAST_INCLUDE) {
 2393                         type = MLD_MODE_IS_INCLUDE;
 2394                         KASSERT(inm->in6m_st[1].iss_asm == 0,
 2395                             ("%s: inm %p is INCLUDE but ASM count is %d",
 2396                              __func__, inm, inm->in6m_st[1].iss_asm));
 2397                 }
 2398         }
 2399 
 2400         /*
 2401          * Generate the filter list changes using a separate function.
 2402          */
 2403         if (is_filter_list_change)
 2404                 return (mld_v2_enqueue_filter_change(mq, inm));
 2405 
 2406         if (type == MLD_DO_NOTHING) {
 2407                 CTR3(KTR_MLD, "%s: nothing to do for %s/%s",
 2408                     __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2409                     if_name(inm->in6m_ifp));
 2410                 return (0);
 2411         }
 2412 
 2413         /*
 2414          * If any sources are present, we must be able to fit at least
 2415          * one in the trailing space of the tail packet's mbuf,
 2416          * ideally more.
 2417          */
 2418         minrec0len = sizeof(struct mldv2_record);
 2419         if (record_has_sources)
 2420                 minrec0len += sizeof(struct in6_addr);
 2421 
 2422         CTR4(KTR_MLD, "%s: queueing %s for %s/%s", __func__,
 2423             mld_rec_type_to_str(type),
 2424             ip6_sprintf(ip6tbuf, &inm->in6m_addr),
 2425             if_name(inm->in6m_ifp));
 2426 
 2427         /*
 2428          * Check if we have a packet in the tail of the queue for this
 2429          * group into which the first group record for this group will fit.
 2430          * Otherwise allocate a new packet.
 2431          * Always allocate leading space for IP6+RA+ICMPV6+REPORT.
 2432          * Note: Group records for G/GSR query responses MUST be sent
 2433          * in their own packet.
 2434          */
 2435         m0 = mbufq_last(mq);
 2436         if (!is_group_query &&
 2437             m0 != NULL &&
 2438             (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) &&
 2439             (m0->m_pkthdr.len + minrec0len) <
 2440              (ifp->if_mtu - MLD_MTUSPACE)) {
 2441                 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
 2442                             sizeof(struct mldv2_record)) /
 2443                             sizeof(struct in6_addr);
 2444                 m = m0;
 2445                 CTR1(KTR_MLD, "%s: use existing packet", __func__);
 2446         } else {
 2447                 if (mbufq_full(mq)) {
 2448                         CTR1(KTR_MLD, "%s: outbound queue full", __func__);
 2449                         return (-ENOMEM);
 2450                 }
 2451                 m = NULL;
 2452                 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
 2453                     sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
 2454                 if (!is_state_change && !is_group_query)
 2455                         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 2456                 if (m == NULL)
 2457                         m = m_gethdr(M_NOWAIT, MT_DATA);
 2458                 if (m == NULL)
 2459                         return (-ENOMEM);
 2460 
 2461                 mld_save_context(m, ifp);
 2462 
 2463                 CTR1(KTR_MLD, "%s: allocated first packet", __func__);
 2464         }
 2465 
 2466         /*
 2467          * Append group record.
 2468          * If we have sources, we don't know how many yet.
 2469          */
 2470         mr.mr_type = type;
 2471         mr.mr_datalen = 0;
 2472         mr.mr_numsrc = 0;
 2473         mr.mr_addr = inm->in6m_addr;
 2474         in6_clearscope(&mr.mr_addr);
 2475         if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
 2476                 if (m != m0)
 2477                         m_freem(m);
 2478                 CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
 2479                 return (-ENOMEM);
 2480         }
 2481         nbytes += sizeof(struct mldv2_record);
 2482 
 2483         /*
 2484          * Append as many sources as will fit in the first packet.
 2485          * If we are appending to a new packet, the chain allocation
 2486          * may potentially use clusters; use m_getptr() in this case.
 2487          * If we are appending to an existing packet, we need to obtain
 2488          * a pointer to the group record after m_append(), in case a new
 2489          * mbuf was allocated.
 2490          *
 2491          * Only append sources which are in-mode at t1. If we are
 2492          * transitioning to MCAST_UNDEFINED state on the group, and
 2493          * use_block_allow is zero, do not include source entries.
 2494          * Otherwise, we need to include this source in the report.
 2495          *
 2496          * Only report recorded sources in our filter set when responding
 2497          * to a group-source query.
 2498          */
 2499         if (record_has_sources) {
 2500                 if (m == m0) {
 2501                         md = m_last(m);
 2502                         pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
 2503                             md->m_len - nbytes);
 2504                 } else {
 2505                         md = m_getptr(m, 0, &off);
 2506                         pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
 2507                             off);
 2508                 }
 2509                 msrcs = 0;
 2510                 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs,
 2511                     nims) {
 2512                         CTR2(KTR_MLD, "%s: visit node %s", __func__,
 2513                             ip6_sprintf(ip6tbuf, &ims->im6s_addr));
 2514                         now = im6s_get_mode(inm, ims, 1);
 2515                         CTR2(KTR_MLD, "%s: node is %d", __func__, now);
 2516                         if ((now != mode) ||
 2517                             (now == mode &&
 2518                              (!use_block_allow && mode == MCAST_UNDEFINED))) {
 2519                                 CTR1(KTR_MLD, "%s: skip node", __func__);
 2520                                 continue;
 2521                         }
 2522                         if (is_source_query && ims->im6s_stp == 0) {
 2523                                 CTR1(KTR_MLD, "%s: skip unrecorded node",
 2524                                     __func__);
 2525                                 continue;
 2526                         }
 2527                         CTR1(KTR_MLD, "%s: append node", __func__);
 2528                         if (!m_append(m, sizeof(struct in6_addr),
 2529                             (void *)&ims->im6s_addr)) {
 2530                                 if (m != m0)
 2531                                         m_freem(m);
 2532                                 CTR1(KTR_MLD, "%s: m_append() failed.",
 2533                                     __func__);
 2534                                 return (-ENOMEM);
 2535                         }
 2536                         nbytes += sizeof(struct in6_addr);
 2537                         ++msrcs;
 2538                         if (msrcs == m0srcs)
 2539                                 break;
 2540                 }
 2541                 CTR2(KTR_MLD, "%s: msrcs is %d this packet", __func__,
 2542                     msrcs);
 2543                 pmr->mr_numsrc = htons(msrcs);
 2544                 nbytes += (msrcs * sizeof(struct in6_addr));
 2545         }
 2546 
 2547         if (is_source_query && msrcs == 0) {
 2548                 CTR1(KTR_MLD, "%s: no recorded sources to report", __func__);
 2549                 if (m != m0)
 2550                         m_freem(m);
 2551                 return (0);
 2552         }
 2553 
 2554         /*
 2555          * We are good to go with first packet.
 2556          */
 2557         if (m != m0) {
 2558                 CTR1(KTR_MLD, "%s: enqueueing first packet", __func__);
 2559                 m->m_pkthdr.PH_vt.vt_nrecs = 1;
 2560                 mbufq_enqueue(mq, m);
 2561         } else
 2562                 m->m_pkthdr.PH_vt.vt_nrecs++;
 2563 
 2564         /*
 2565          * No further work needed if no source list in packet(s).
 2566          */
 2567         if (!record_has_sources)
 2568                 return (nbytes);
 2569 
 2570         /*
 2571          * Whilst sources remain to be announced, we need to allocate
 2572          * a new packet and fill out as many sources as will fit.
 2573          * Always try for a cluster first.
 2574          */
 2575         while (nims != NULL) {
 2576                 if (mbufq_full(mq)) {
 2577                         CTR1(KTR_MLD, "%s: outbound queue full", __func__);
 2578                         return (-ENOMEM);
 2579                 }
 2580                 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 2581                 if (m == NULL)
 2582                         m = m_gethdr(M_NOWAIT, MT_DATA);
 2583                 if (m == NULL)
 2584                         return (-ENOMEM);
 2585                 mld_save_context(m, ifp);
 2586                 md = m_getptr(m, 0, &off);
 2587                 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off);
 2588                 CTR1(KTR_MLD, "%s: allocated next packet", __func__);
 2589 
 2590                 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
 2591                         if (m != m0)
 2592                                 m_freem(m);
 2593                         CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
 2594                         return (-ENOMEM);
 2595                 }
 2596                 m->m_pkthdr.PH_vt.vt_nrecs = 1;
 2597                 nbytes += sizeof(struct mldv2_record);
 2598 
 2599                 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
 2600                     sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
 2601 
 2602                 msrcs = 0;
 2603                 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
 2604                         CTR2(KTR_MLD, "%s: visit node %s",
 2605                             __func__, ip6_sprintf(ip6tbuf, &ims->im6s_addr));
 2606                         now = im6s_get_mode(inm, ims, 1);
 2607                         if ((now != mode) ||
 2608                             (now == mode &&
 2609                              (!use_block_allow && mode == MCAST_UNDEFINED))) {
 2610                                 CTR1(KTR_MLD, "%s: skip node", __func__);
 2611                                 continue;
 2612                         }
 2613                         if (is_source_query && ims->im6s_stp == 0) {
 2614                                 CTR1(KTR_MLD, "%s: skip unrecorded node",
 2615                                     __func__);
 2616                                 continue;
 2617                         }
 2618                         CTR1(KTR_MLD, "%s: append node", __func__);
 2619                         if (!m_append(m, sizeof(struct in6_addr),
 2620                             (void *)&ims->im6s_addr)) {
 2621                                 if (m != m0)
 2622                                         m_freem(m);
 2623                                 CTR1(KTR_MLD, "%s: m_append() failed.",
 2624                                     __func__);
 2625                                 return (-ENOMEM);
 2626                         }
 2627                         ++msrcs;
 2628                         if (msrcs == m0srcs)
 2629                                 break;
 2630                 }
 2631                 pmr->mr_numsrc = htons(msrcs);
 2632                 nbytes += (msrcs * sizeof(struct in6_addr));
 2633 
 2634                 CTR1(KTR_MLD, "%s: enqueueing next packet", __func__);
 2635                 mbufq_enqueue(mq, m);
 2636         }
 2637 
 2638         return (nbytes);
 2639 }
 2640 
 2641 /*
 2642  * Type used to mark record pass completion.
 2643  * We exploit the fact we can cast to this easily from the
 2644  * current filter modes on each ip_msource node.
 2645  */
 2646 typedef enum {
 2647         REC_NONE = 0x00,        /* MCAST_UNDEFINED */
 2648         REC_ALLOW = 0x01,       /* MCAST_INCLUDE */
 2649         REC_BLOCK = 0x02,       /* MCAST_EXCLUDE */
 2650         REC_FULL = REC_ALLOW | REC_BLOCK
 2651 } rectype_t;
 2652 
 2653 /*
 2654  * Enqueue an MLDv2 filter list change to the given output queue.
 2655  *
 2656  * Source list filter state is held in an RB-tree. When the filter list
 2657  * for a group is changed without changing its mode, we need to compute
 2658  * the deltas between T0 and T1 for each source in the filter set,
 2659  * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
 2660  *
 2661  * As we may potentially queue two record types, and the entire R-B tree
 2662  * needs to be walked at once, we break this out into its own function
 2663  * so we can generate a tightly packed queue of packets.
 2664  *
 2665  * XXX This could be written to only use one tree walk, although that makes
 2666  * serializing into the mbuf chains a bit harder. For now we do two walks
 2667  * which makes things easier on us, and it may or may not be harder on
 2668  * the L2 cache.
 2669  *
 2670  * If successful the size of all data appended to the queue is returned,
 2671  * otherwise an error code less than zero is returned, or zero if
 2672  * no record(s) were appended.
 2673  */
 2674 static int
 2675 mld_v2_enqueue_filter_change(struct mbufq *mq, struct in6_multi *inm)
 2676 {
 2677         static const int MINRECLEN =
 2678             sizeof(struct mldv2_record) + sizeof(struct in6_addr);
 2679         struct ifnet            *ifp;
 2680         struct mldv2_record      mr;
 2681         struct mldv2_record     *pmr;
 2682         struct ip6_msource      *ims, *nims;
 2683         struct mbuf             *m, *m0, *md;
 2684         int                      m0srcs, nbytes, npbytes, off, rsrcs, schanged;
 2685         int                      nallow, nblock;
 2686         uint8_t                  mode, now, then;
 2687         rectype_t                crt, drt, nrt;
 2688 #ifdef KTR
 2689         char                     ip6tbuf[INET6_ADDRSTRLEN];
 2690 #endif
 2691 
 2692         IN6_MULTI_LIST_LOCK_ASSERT();
 2693 
 2694         if (inm->in6m_nsrc == 0 ||
 2695             (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0))
 2696                 return (0);
 2697 
 2698         ifp = inm->in6m_ifp;                    /* interface */
 2699         mode = inm->in6m_st[1].iss_fmode;       /* filter mode at t1 */
 2700         crt = REC_NONE; /* current group record type */
 2701         drt = REC_NONE; /* mask of completed group record types */
 2702         nrt = REC_NONE; /* record type for current node */
 2703         m0srcs = 0;     /* # source which will fit in current mbuf chain */
 2704         npbytes = 0;    /* # of bytes appended this packet */
 2705         nbytes = 0;     /* # of bytes appended to group's state-change queue */
 2706         rsrcs = 0;      /* # sources encoded in current record */
 2707         schanged = 0;   /* # nodes encoded in overall filter change */
 2708         nallow = 0;     /* # of source entries in ALLOW_NEW */
 2709         nblock = 0;     /* # of source entries in BLOCK_OLD */
 2710         nims = NULL;    /* next tree node pointer */
 2711 
 2712         /*
 2713          * For each possible filter record mode.
 2714          * The first kind of source we encounter tells us which
 2715          * is the first kind of record we start appending.
 2716          * If a node transitioned to UNDEFINED at t1, its mode is treated
 2717          * as the inverse of the group's filter mode.
 2718          */
 2719         while (drt != REC_FULL) {
 2720                 do {
 2721                         m0 = mbufq_last(mq);
 2722                         if (m0 != NULL &&
 2723                             (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
 2724                              MLD_V2_REPORT_MAXRECS) &&
 2725                             (m0->m_pkthdr.len + MINRECLEN) <
 2726                              (ifp->if_mtu - MLD_MTUSPACE)) {
 2727                                 m = m0;
 2728                                 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
 2729                                             sizeof(struct mldv2_record)) /
 2730                                             sizeof(struct in6_addr);
 2731                                 CTR1(KTR_MLD,
 2732                                     "%s: use previous packet", __func__);
 2733                         } else {
 2734                                 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 2735                                 if (m == NULL)
 2736                                         m = m_gethdr(M_NOWAIT, MT_DATA);
 2737                                 if (m == NULL) {
 2738                                         CTR1(KTR_MLD,
 2739                                             "%s: m_get*() failed", __func__);
 2740                                         return (-ENOMEM);
 2741                                 }
 2742                                 m->m_pkthdr.PH_vt.vt_nrecs = 0;
 2743                                 mld_save_context(m, ifp);
 2744                                 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
 2745                                     sizeof(struct mldv2_record)) /
 2746                                     sizeof(struct in6_addr);
 2747                                 npbytes = 0;
 2748                                 CTR1(KTR_MLD,
 2749                                     "%s: allocated new packet", __func__);
 2750                         }
 2751                         /*
 2752                          * Append the MLD group record header to the
 2753                          * current packet's data area.
 2754                          * Recalculate pointer to free space for next
 2755                          * group record, in case m_append() allocated
 2756                          * a new mbuf or cluster.
 2757                          */
 2758                         memset(&mr, 0, sizeof(mr));
 2759                         mr.mr_addr = inm->in6m_addr;
 2760                         in6_clearscope(&mr.mr_addr);
 2761                         if (!m_append(m, sizeof(mr), (void *)&mr)) {
 2762                                 if (m != m0)
 2763                                         m_freem(m);
 2764                                 CTR1(KTR_MLD,
 2765                                     "%s: m_append() failed", __func__);
 2766                                 return (-ENOMEM);
 2767                         }
 2768                         npbytes += sizeof(struct mldv2_record);
 2769                         if (m != m0) {
 2770                                 /* new packet; offset in chain */
 2771                                 md = m_getptr(m, npbytes -
 2772                                     sizeof(struct mldv2_record), &off);
 2773                                 pmr = (struct mldv2_record *)(mtod(md,
 2774                                     uint8_t *) + off);
 2775                         } else {
 2776                                 /* current packet; offset from last append */
 2777                                 md = m_last(m);
 2778                                 pmr = (struct mldv2_record *)(mtod(md,
 2779                                     uint8_t *) + md->m_len -
 2780                                     sizeof(struct mldv2_record));
 2781                         }
 2782                         /*
 2783                          * Begin walking the tree for this record type
 2784                          * pass, or continue from where we left off
 2785                          * previously if we had to allocate a new packet.
 2786                          * Only report deltas in-mode at t1.
 2787                          * We need not report included sources as allowed
 2788                          * if we are in inclusive mode on the group,
 2789                          * however the converse is not true.
 2790                          */
 2791                         rsrcs = 0;
 2792                         if (nims == NULL) {
 2793                                 nims = RB_MIN(ip6_msource_tree,
 2794                                     &inm->in6m_srcs);
 2795                         }
 2796                         RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
 2797                                 CTR2(KTR_MLD, "%s: visit node %s", __func__,
 2798                                     ip6_sprintf(ip6tbuf, &ims->im6s_addr));
 2799                                 now = im6s_get_mode(inm, ims, 1);
 2800                                 then = im6s_get_mode(inm, ims, 0);
 2801                                 CTR3(KTR_MLD, "%s: mode: t0 %d, t1 %d",
 2802                                     __func__, then, now);
 2803                                 if (now == then) {
 2804                                         CTR1(KTR_MLD,
 2805                                             "%s: skip unchanged", __func__);
 2806                                         continue;
 2807                                 }
 2808                                 if (mode == MCAST_EXCLUDE &&
 2809                                     now == MCAST_INCLUDE) {
 2810                                         CTR1(KTR_MLD,
 2811                                             "%s: skip IN src on EX group",
 2812                                             __func__);
 2813                                         continue;
 2814                                 }
 2815                                 nrt = (rectype_t)now;
 2816                                 if (nrt == REC_NONE)
 2817                                         nrt = (rectype_t)(~mode & REC_FULL);
 2818                                 if (schanged++ == 0) {
 2819                                         crt = nrt;
 2820                                 } else if (crt != nrt)
 2821                                         continue;
 2822                                 if (!m_append(m, sizeof(struct in6_addr),
 2823                                     (void *)&ims->im6s_addr)) {
 2824                                         if (m != m0)
 2825                                                 m_freem(m);
 2826                                         CTR1(KTR_MLD,
 2827                                             "%s: m_append() failed", __func__);
 2828                                         return (-ENOMEM);
 2829                                 }
 2830                                 nallow += !!(crt == REC_ALLOW);
 2831                                 nblock += !!(crt == REC_BLOCK);
 2832                                 if (++rsrcs == m0srcs)
 2833                                         break;
 2834                         }
 2835                         /*
 2836                          * If we did not append any tree nodes on this
 2837                          * pass, back out of allocations.
 2838                          */
 2839                         if (rsrcs == 0) {
 2840                                 npbytes -= sizeof(struct mldv2_record);
 2841                                 if (m != m0) {
 2842                                         CTR1(KTR_MLD,
 2843                                             "%s: m_free(m)", __func__);
 2844                                         m_freem(m);
 2845                                 } else {
 2846                                         CTR1(KTR_MLD,
 2847                                             "%s: m_adj(m, -mr)", __func__);
 2848                                         m_adj(m, -((int)sizeof(
 2849                                             struct mldv2_record)));
 2850                                 }
 2851                                 continue;
 2852                         }
 2853                         npbytes += (rsrcs * sizeof(struct in6_addr));
 2854                         if (crt == REC_ALLOW)
 2855                                 pmr->mr_type = MLD_ALLOW_NEW_SOURCES;
 2856                         else if (crt == REC_BLOCK)
 2857                                 pmr->mr_type = MLD_BLOCK_OLD_SOURCES;
 2858                         pmr->mr_numsrc = htons(rsrcs);
 2859                         /*
 2860                          * Count the new group record, and enqueue this
 2861                          * packet if it wasn't already queued.
 2862                          */
 2863                         m->m_pkthdr.PH_vt.vt_nrecs++;
 2864                         if (m != m0)
 2865                                 mbufq_enqueue(mq, m);
 2866                         nbytes += npbytes;
 2867                 } while (nims != NULL);
 2868                 drt |= crt;
 2869                 crt = (~crt & REC_FULL);
 2870         }
 2871 
 2872         CTR3(KTR_MLD, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
 2873             nallow, nblock);
 2874 
 2875         return (nbytes);
 2876 }
 2877 
 2878 static int
 2879 mld_v2_merge_state_changes(struct in6_multi *inm, struct mbufq *scq)
 2880 {
 2881         struct mbufq    *gq;
 2882         struct mbuf     *m;             /* pending state-change */
 2883         struct mbuf     *m0;            /* copy of pending state-change */
 2884         struct mbuf     *mt;            /* last state-change in packet */
 2885         int              docopy, domerge;
 2886         u_int            recslen;
 2887 
 2888         docopy = 0;
 2889         domerge = 0;
 2890         recslen = 0;
 2891 
 2892         IN6_MULTI_LIST_LOCK_ASSERT();
 2893         MLD_LOCK_ASSERT();
 2894 
 2895         /*
 2896          * If there are further pending retransmissions, make a writable
 2897          * copy of each queued state-change message before merging.
 2898          */
 2899         if (inm->in6m_scrv > 0)
 2900                 docopy = 1;
 2901 
 2902         gq = &inm->in6m_scq;
 2903 #ifdef KTR
 2904         if (mbufq_first(gq) == NULL) {
 2905                 CTR2(KTR_MLD, "%s: WARNING: queue for inm %p is empty",
 2906                     __func__, inm);
 2907         }
 2908 #endif
 2909 
 2910         m = mbufq_first(gq);
 2911         while (m != NULL) {
 2912                 /*
 2913                  * Only merge the report into the current packet if
 2914                  * there is sufficient space to do so; an MLDv2 report
 2915                  * packet may only contain 65,535 group records.
 2916                  * Always use a simple mbuf chain concatentation to do this,
 2917                  * as large state changes for single groups may have
 2918                  * allocated clusters.
 2919                  */
 2920                 domerge = 0;
 2921                 mt = mbufq_last(scq);
 2922                 if (mt != NULL) {
 2923                         recslen = m_length(m, NULL);
 2924 
 2925                         if ((mt->m_pkthdr.PH_vt.vt_nrecs +
 2926                             m->m_pkthdr.PH_vt.vt_nrecs <=
 2927                             MLD_V2_REPORT_MAXRECS) &&
 2928                             (mt->m_pkthdr.len + recslen <=
 2929                             (inm->in6m_ifp->if_mtu - MLD_MTUSPACE)))
 2930                                 domerge = 1;
 2931                 }
 2932 
 2933                 if (!domerge && mbufq_full(gq)) {
 2934                         CTR2(KTR_MLD,
 2935                             "%s: outbound queue full, skipping whole packet %p",
 2936                             __func__, m);
 2937                         mt = m->m_nextpkt;
 2938                         if (!docopy)
 2939                                 m_freem(m);
 2940                         m = mt;
 2941                         continue;
 2942                 }
 2943 
 2944                 if (!docopy) {
 2945                         CTR2(KTR_MLD, "%s: dequeueing %p", __func__, m);
 2946                         m0 = mbufq_dequeue(gq);
 2947                         m = m0->m_nextpkt;
 2948                 } else {
 2949                         CTR2(KTR_MLD, "%s: copying %p", __func__, m);
 2950                         m0 = m_dup(m, M_NOWAIT);
 2951                         if (m0 == NULL)
 2952                                 return (ENOMEM);
 2953                         m0->m_nextpkt = NULL;
 2954                         m = m->m_nextpkt;
 2955                 }
 2956 
 2957                 if (!domerge) {
 2958                         CTR3(KTR_MLD, "%s: queueing %p to scq %p)",
 2959                             __func__, m0, scq);
 2960                         mbufq_enqueue(scq, m0);
 2961                 } else {
 2962                         struct mbuf *mtl;       /* last mbuf of packet mt */
 2963 
 2964                         CTR3(KTR_MLD, "%s: merging %p with ifscq tail %p)",
 2965                             __func__, m0, mt);
 2966 
 2967                         mtl = m_last(mt);
 2968                         m0->m_flags &= ~M_PKTHDR;
 2969                         mt->m_pkthdr.len += recslen;
 2970                         mt->m_pkthdr.PH_vt.vt_nrecs +=
 2971                             m0->m_pkthdr.PH_vt.vt_nrecs;
 2972 
 2973                         mtl->m_next = m0;
 2974                 }
 2975         }
 2976 
 2977         return (0);
 2978 }
 2979 
 2980 /*
 2981  * Respond to a pending MLDv2 General Query.
 2982  */
 2983 static void
 2984 mld_v2_dispatch_general_query(struct mld_ifsoftc *mli)
 2985 {
 2986         struct ifmultiaddr      *ifma;
 2987         struct ifnet            *ifp;
 2988         struct in6_multi        *inm;
 2989         int                      retval;
 2990 
 2991         IN6_MULTI_LIST_LOCK_ASSERT();
 2992         MLD_LOCK_ASSERT();
 2993 
 2994         KASSERT(mli->mli_version == MLD_VERSION_2,
 2995             ("%s: called when version %d", __func__, mli->mli_version));
 2996 
 2997         /*
 2998          * Check that there are some packets queued. If so, send them first.
 2999          * For large number of groups the reply to general query can take
 3000          * many packets, we should finish sending them before starting of
 3001          * queuing the new reply.
 3002          */
 3003         if (mbufq_len(&mli->mli_gq) != 0)
 3004                 goto send;
 3005 
 3006         ifp = mli->mli_ifp;
 3007 
 3008         IF_ADDR_RLOCK(ifp);
 3009         CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 3010                 if (ifma->ifma_addr->sa_family != AF_INET6 ||
 3011                     ifma->ifma_protospec == NULL)
 3012                         continue;
 3013 
 3014                 inm = (struct in6_multi *)ifma->ifma_protospec;
 3015                 KASSERT(ifp == inm->in6m_ifp,
 3016                     ("%s: inconsistent ifp", __func__));
 3017 
 3018                 switch (inm->in6m_state) {
 3019                 case MLD_NOT_MEMBER:
 3020                 case MLD_SILENT_MEMBER:
 3021                         break;
 3022                 case MLD_REPORTING_MEMBER:
 3023                 case MLD_IDLE_MEMBER:
 3024                 case MLD_LAZY_MEMBER:
 3025                 case MLD_SLEEPING_MEMBER:
 3026                 case MLD_AWAKENING_MEMBER:
 3027                         inm->in6m_state = MLD_REPORTING_MEMBER;
 3028                         retval = mld_v2_enqueue_group_record(&mli->mli_gq,
 3029                             inm, 0, 0, 0, 0);
 3030                         CTR2(KTR_MLD, "%s: enqueue record = %d",
 3031                             __func__, retval);
 3032                         break;
 3033                 case MLD_G_QUERY_PENDING_MEMBER:
 3034                 case MLD_SG_QUERY_PENDING_MEMBER:
 3035                 case MLD_LEAVING_MEMBER:
 3036                         break;
 3037                 }
 3038         }
 3039         IF_ADDR_RUNLOCK(ifp);
 3040 
 3041 send:
 3042         mld_dispatch_queue(&mli->mli_gq, MLD_MAX_RESPONSE_BURST);
 3043 
 3044         /*
 3045          * Slew transmission of bursts over 500ms intervals.
 3046          */
 3047         if (mbufq_first(&mli->mli_gq) != NULL) {
 3048                 mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY(
 3049                     MLD_RESPONSE_BURST_INTERVAL);
 3050                 V_interface_timers_running6 = 1;
 3051         }
 3052 }
 3053 
 3054 /*
 3055  * Transmit the next pending message in the output queue.
 3056  *
 3057  * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
 3058  * MRT: Nothing needs to be done, as MLD traffic is always local to
 3059  * a link and uses a link-scope multicast address.
 3060  */
 3061 static void
 3062 mld_dispatch_packet(struct mbuf *m)
 3063 {
 3064         struct ip6_moptions      im6o;
 3065         struct ifnet            *ifp;
 3066         struct ifnet            *oifp;
 3067         struct mbuf             *m0;
 3068         struct mbuf             *md;
 3069         struct ip6_hdr          *ip6;
 3070         struct mld_hdr          *mld;
 3071         int                      error;
 3072         int                      off;
 3073         int                      type;
 3074         uint32_t                 ifindex;
 3075 
 3076         CTR2(KTR_MLD, "%s: transmit %p", __func__, m);
 3077 
 3078         /*
 3079          * Set VNET image pointer from enqueued mbuf chain
 3080          * before doing anything else. Whilst we use interface
 3081          * indexes to guard against interface detach, they are
 3082          * unique to each VIMAGE and must be retrieved.
 3083          */
 3084         ifindex = mld_restore_context(m);
 3085 
 3086         /*
 3087          * Check if the ifnet still exists. This limits the scope of
 3088          * any race in the absence of a global ifp lock for low cost
 3089          * (an array lookup).
 3090          */
 3091         ifp = ifnet_byindex(ifindex);
 3092         if (ifp == NULL) {
 3093                 CTR3(KTR_MLD, "%s: dropped %p as ifindex %u went away.",
 3094                     __func__, m, ifindex);
 3095                 m_freem(m);
 3096                 IP6STAT_INC(ip6s_noroute);
 3097                 goto out;
 3098         }
 3099 
 3100         im6o.im6o_multicast_hlim  = 1;
 3101         im6o.im6o_multicast_loop = (V_ip6_mrouter != NULL);
 3102         im6o.im6o_multicast_ifp = ifp;
 3103 
 3104         if (m->m_flags & M_MLDV1) {
 3105                 m0 = m;
 3106         } else {
 3107                 m0 = mld_v2_encap_report(ifp, m);
 3108                 if (m0 == NULL) {
 3109                         CTR2(KTR_MLD, "%s: dropped %p", __func__, m);
 3110                         IP6STAT_INC(ip6s_odropped);
 3111                         goto out;
 3112                 }
 3113         }
 3114 
 3115         mld_scrub_context(m0);
 3116         m_clrprotoflags(m);
 3117         m0->m_pkthdr.rcvif = V_loif;
 3118 
 3119         ip6 = mtod(m0, struct ip6_hdr *);
 3120 #if 0
 3121         (void)in6_setscope(&ip6->ip6_dst, ifp, NULL);   /* XXX LOR */
 3122 #else
 3123         /*
 3124          * XXX XXX Break some KPI rules to prevent an LOR which would
 3125          * occur if we called in6_setscope() at transmission.
 3126          * See comments at top of file.
 3127          */
 3128         MLD_EMBEDSCOPE(&ip6->ip6_dst, ifp->if_index);
 3129 #endif
 3130 
 3131         /*
 3132          * Retrieve the ICMPv6 type before handoff to ip6_output(),
 3133          * so we can bump the stats.
 3134          */
 3135         md = m_getptr(m0, sizeof(struct ip6_hdr), &off);
 3136         mld = (struct mld_hdr *)(mtod(md, uint8_t *) + off);
 3137         type = mld->mld_type;
 3138 
 3139         error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, &im6o,
 3140             &oifp, NULL);
 3141         if (error) {
 3142                 CTR3(KTR_MLD, "%s: ip6_output(%p) = %d", __func__, m0, error);
 3143                 goto out;
 3144         }
 3145         ICMP6STAT_INC(icp6s_outhist[type]);
 3146         if (oifp != NULL) {
 3147                 icmp6_ifstat_inc(oifp, ifs6_out_msg);
 3148                 switch (type) {
 3149                 case MLD_LISTENER_REPORT:
 3150                 case MLDV2_LISTENER_REPORT:
 3151                         icmp6_ifstat_inc(oifp, ifs6_out_mldreport);
 3152                         break;
 3153                 case MLD_LISTENER_DONE:
 3154                         icmp6_ifstat_inc(oifp, ifs6_out_mlddone);
 3155                         break;
 3156                 }
 3157         }
 3158 out:
 3159         return;
 3160 }
 3161 
 3162 /*
 3163  * Encapsulate an MLDv2 report.
 3164  *
 3165  * KAME IPv6 requires that hop-by-hop options be passed separately,
 3166  * and that the IPv6 header be prepended in a separate mbuf.
 3167  *
 3168  * Returns a pointer to the new mbuf chain head, or NULL if the
 3169  * allocation failed.
 3170  */
 3171 static struct mbuf *
 3172 mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
 3173 {
 3174         struct mbuf             *mh;
 3175         struct mldv2_report     *mld;
 3176         struct ip6_hdr          *ip6;
 3177         struct in6_ifaddr       *ia;
 3178         int                      mldreclen;
 3179 
 3180         KASSERT(ifp != NULL, ("%s: null ifp", __func__));
 3181         KASSERT((m->m_flags & M_PKTHDR),
 3182             ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
 3183 
 3184         /*
 3185          * RFC3590: OK to send as :: or tentative during DAD.
 3186          */
 3187         ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
 3188         if (ia == NULL)
 3189                 CTR1(KTR_MLD, "%s: warning: ia is NULL", __func__);
 3190 
 3191         mh = m_gethdr(M_NOWAIT, MT_DATA);
 3192         if (mh == NULL) {
 3193                 if (ia != NULL)
 3194                         ifa_free(&ia->ia_ifa);
 3195                 m_freem(m);
 3196                 return (NULL);
 3197         }
 3198         M_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report));
 3199 
 3200         mldreclen = m_length(m, NULL);
 3201         CTR2(KTR_MLD, "%s: mldreclen is %d", __func__, mldreclen);
 3202 
 3203         mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report);
 3204         mh->m_pkthdr.len = sizeof(struct ip6_hdr) +
 3205             sizeof(struct mldv2_report) + mldreclen;
 3206 
 3207         ip6 = mtod(mh, struct ip6_hdr *);
 3208         ip6->ip6_flow = 0;
 3209         ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
 3210         ip6->ip6_vfc |= IPV6_VERSION;
 3211         ip6->ip6_nxt = IPPROTO_ICMPV6;
 3212         ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
 3213         if (ia != NULL)
 3214                 ifa_free(&ia->ia_ifa);
 3215         ip6->ip6_dst = in6addr_linklocal_allv2routers;
 3216         /* scope ID will be set in netisr */
 3217 
 3218         mld = (struct mldv2_report *)(ip6 + 1);
 3219         mld->mld_type = MLDV2_LISTENER_REPORT;
 3220         mld->mld_code = 0;
 3221         mld->mld_cksum = 0;
 3222         mld->mld_v2_reserved = 0;
 3223         mld->mld_v2_numrecs = htons(m->m_pkthdr.PH_vt.vt_nrecs);
 3224         m->m_pkthdr.PH_vt.vt_nrecs = 0;
 3225 
 3226         mh->m_next = m;
 3227         mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
 3228             sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen);
 3229         return (mh);
 3230 }
 3231 
 3232 #ifdef KTR
 3233 static char *
 3234 mld_rec_type_to_str(const int type)
 3235 {
 3236 
 3237         switch (type) {
 3238                 case MLD_CHANGE_TO_EXCLUDE_MODE:
 3239                         return "TO_EX";
 3240                         break;
 3241                 case MLD_CHANGE_TO_INCLUDE_MODE:
 3242                         return "TO_IN";
 3243                         break;
 3244                 case MLD_MODE_IS_EXCLUDE:
 3245                         return "MODE_EX";
 3246                         break;
 3247                 case MLD_MODE_IS_INCLUDE:
 3248                         return "MODE_IN";
 3249                         break;
 3250                 case MLD_ALLOW_NEW_SOURCES:
 3251                         return "ALLOW_NEW";
 3252                         break;
 3253                 case MLD_BLOCK_OLD_SOURCES:
 3254                         return "BLOCK_OLD";
 3255                         break;
 3256                 default:
 3257                         break;
 3258         }
 3259         return "unknown";
 3260 }
 3261 #endif
 3262 
 3263 static void
 3264 mld_init(void *unused __unused)
 3265 {
 3266 
 3267         CTR1(KTR_MLD, "%s: initializing", __func__);
 3268         MLD_LOCK_INIT();
 3269 
 3270         ip6_initpktopts(&mld_po);
 3271         mld_po.ip6po_hlim = 1;
 3272         mld_po.ip6po_hbh = &mld_ra.hbh;
 3273         mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER;
 3274         mld_po.ip6po_flags = IP6PO_DONTFRAG;
 3275 }
 3276 SYSINIT(mld_init, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_init, NULL);
 3277 
 3278 static void
 3279 mld_uninit(void *unused __unused)
 3280 {
 3281 
 3282         CTR1(KTR_MLD, "%s: tearing down", __func__);
 3283         MLD_LOCK_DESTROY();
 3284 }
 3285 SYSUNINIT(mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_uninit, NULL);
 3286 
 3287 static void
 3288 vnet_mld_init(const void *unused __unused)
 3289 {
 3290 
 3291         CTR1(KTR_MLD, "%s: initializing", __func__);
 3292 
 3293         LIST_INIT(&V_mli_head);
 3294 }
 3295 VNET_SYSINIT(vnet_mld_init, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_init,
 3296     NULL);
 3297 
 3298 static void
 3299 vnet_mld_uninit(const void *unused __unused)
 3300 {
 3301 
 3302         /* This can happen if we shutdown the network stack. */
 3303         CTR1(KTR_MLD, "%s: tearing down", __func__);
 3304 }
 3305 VNET_SYSUNINIT(vnet_mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_uninit,
 3306     NULL);
 3307 
 3308 static int
 3309 mld_modevent(module_t mod, int type, void *unused __unused)
 3310 {
 3311 
 3312     switch (type) {
 3313     case MOD_LOAD:
 3314     case MOD_UNLOAD:
 3315         break;
 3316     default:
 3317         return (EOPNOTSUPP);
 3318     }
 3319     return (0);
 3320 }
 3321 
 3322 static moduledata_t mld_mod = {
 3323     "mld",
 3324     mld_modevent,
 3325     0
 3326 };
 3327 DECLARE_MODULE(mld, mld_mod, SI_SUB_PROTO_MC, SI_ORDER_ANY);

Cache object: 3115a0c68c6f42705287a32e88e89855


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.