The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet/ip_mroute.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1989 Stephen Deering
    3  * Copyright (c) 1992, 1993
    4  *      The Regents of the University of California.  All rights reserved.
    5  *
    6  * This code is derived from software contributed to Berkeley by
    7  * Stephen Deering of Stanford University.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 4. Neither the name of the University nor the names of its contributors
   18  *    may be used to endorse or promote products derived from this software
   19  *    without specific prior written permission.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   31  * SUCH DAMAGE.
   32  *
   33  *      @(#)ip_mroute.c 8.2 (Berkeley) 11/15/93
   34  */
   35 
   36 /*
   37  * IP multicast forwarding procedures
   38  *
   39  * Written by David Waitzman, BBN Labs, August 1988.
   40  * Modified by Steve Deering, Stanford, February 1989.
   41  * Modified by Mark J. Steiglitz, Stanford, May, 1991
   42  * Modified by Van Jacobson, LBL, January 1993
   43  * Modified by Ajit Thyagarajan, PARC, August 1993
   44  * Modified by Bill Fenner, PARC, April 1995
   45  * Modified by Ahmed Helmy, SGI, June 1996
   46  * Modified by George Edmond Eddy (Rusty), ISI, February 1998
   47  * Modified by Pavlin Radoslavov, USC/ISI, May 1998, August 1999, October 2000
   48  * Modified by Hitoshi Asaeda, WIDE, August 2000
   49  * Modified by Pavlin Radoslavov, ICSI, October 2002
   50  *
   51  * MROUTING Revision: 3.5
   52  * and PIM-SMv2 and PIM-DM support, advanced API support,
   53  * bandwidth metering and signaling
   54  */
   55 
   56 /*
   57  * TODO: Prefix functions with ipmf_.
   58  * TODO: Maintain a refcount on if_allmulti() in ifnet or in the protocol
   59  * domain attachment (if_afdata) so we can track consumers of that service.
   60  * TODO: Deprecate routing socket path for SIOCGETSGCNT and SIOCGETVIFCNT,
   61  * move it to socket options.
   62  * TODO: Cleanup LSRR removal further.
   63  * TODO: Push RSVP stubs into raw_ip.c.
   64  * TODO: Use bitstring.h for vif set.
   65  * TODO: Fix mrt6_ioctl dangling ref when dynamically loaded.
   66  * TODO: Sync ip6_mroute.c with this file.
   67  */
   68 
   69 #include <sys/cdefs.h>
   70 __FBSDID("$FreeBSD$");
   71 
   72 #include "opt_inet.h"
   73 #include "opt_mrouting.h"
   74 
   75 #define _PIM_VT 1
   76 
   77 #include <sys/param.h>
   78 #include <sys/kernel.h>
   79 #include <sys/stddef.h>
   80 #include <sys/lock.h>
   81 #include <sys/ktr.h>
   82 #include <sys/malloc.h>
   83 #include <sys/mbuf.h>
   84 #include <sys/module.h>
   85 #include <sys/priv.h>
   86 #include <sys/protosw.h>
   87 #include <sys/signalvar.h>
   88 #include <sys/socket.h>
   89 #include <sys/socketvar.h>
   90 #include <sys/sockio.h>
   91 #include <sys/sx.h>
   92 #include <sys/sysctl.h>
   93 #include <sys/syslog.h>
   94 #include <sys/systm.h>
   95 #include <sys/time.h>
   96 
   97 #include <net/if.h>
   98 #include <net/netisr.h>
   99 #include <net/route.h>
  100 #include <net/vnet.h>
  101 
  102 #include <netinet/in.h>
  103 #include <netinet/igmp.h>
  104 #include <netinet/in_systm.h>
  105 #include <netinet/in_var.h>
  106 #include <netinet/ip.h>
  107 #include <netinet/ip_encap.h>
  108 #include <netinet/ip_mroute.h>
  109 #include <netinet/ip_var.h>
  110 #include <netinet/ip_options.h>
  111 #include <netinet/pim.h>
  112 #include <netinet/pim_var.h>
  113 #include <netinet/udp.h>
  114 
  115 #include <machine/in_cksum.h>
  116 
  117 #ifndef KTR_IPMF
  118 #define KTR_IPMF KTR_INET
  119 #endif
  120 
  121 #define         VIFI_INVALID    ((vifi_t) -1)
  122 #define         M_HASCL(m)      ((m)->m_flags & M_EXT)
  123 
  124 static VNET_DEFINE(uint32_t, last_tv_sec); /* last time we processed this */
  125 #define V_last_tv_sec   VNET(last_tv_sec)
  126 
  127 static MALLOC_DEFINE(M_MRTABLE, "mroutetbl", "multicast forwarding cache");
  128 
  129 /*
  130  * Locking.  We use two locks: one for the virtual interface table and
  131  * one for the forwarding table.  These locks may be nested in which case
  132  * the VIF lock must always be taken first.  Note that each lock is used
  133  * to cover not only the specific data structure but also related data
  134  * structures.
  135  */
  136 
  137 static struct mtx mrouter_mtx;
  138 #define MROUTER_LOCK()          mtx_lock(&mrouter_mtx)
  139 #define MROUTER_UNLOCK()        mtx_unlock(&mrouter_mtx)
  140 #define MROUTER_LOCK_ASSERT()   mtx_assert(&mrouter_mtx, MA_OWNED)
  141 #define MROUTER_LOCK_INIT()                                             \
  142         mtx_init(&mrouter_mtx, "IPv4 multicast forwarding", NULL, MTX_DEF)
  143 #define MROUTER_LOCK_DESTROY()  mtx_destroy(&mrouter_mtx)
  144 
  145 static int ip_mrouter_cnt;      /* # of vnets with active mrouters */
  146 static int ip_mrouter_unloading; /* Allow no more V_ip_mrouter sockets */
  147 
  148 static VNET_DEFINE(struct mrtstat, mrtstat);
  149 #define V_mrtstat               VNET(mrtstat)
  150 SYSCTL_VNET_STRUCT(_net_inet_ip, OID_AUTO, mrtstat, CTLFLAG_RW,
  151     &VNET_NAME(mrtstat), mrtstat,
  152     "IPv4 Multicast Forwarding Statistics (struct mrtstat, "
  153     "netinet/ip_mroute.h)");
  154 
  155 static VNET_DEFINE(u_long, mfchash);
  156 #define V_mfchash               VNET(mfchash)
  157 #define MFCHASH(a, g)                                                   \
  158         ((((a).s_addr >> 20) ^ ((a).s_addr >> 10) ^ (a).s_addr ^ \
  159           ((g).s_addr >> 20) ^ ((g).s_addr >> 10) ^ (g).s_addr) & V_mfchash)
  160 #define MFCHASHSIZE     256
  161 
  162 static u_long mfchashsize;                      /* Hash size */
  163 static VNET_DEFINE(u_char *, nexpire);          /* 0..mfchashsize-1 */
  164 #define V_nexpire               VNET(nexpire)
  165 static VNET_DEFINE(LIST_HEAD(mfchashhdr, mfc)*, mfchashtbl);
  166 #define V_mfchashtbl            VNET(mfchashtbl)
  167 
  168 static struct mtx mfc_mtx;
  169 #define MFC_LOCK()              mtx_lock(&mfc_mtx)
  170 #define MFC_UNLOCK()            mtx_unlock(&mfc_mtx)
  171 #define MFC_LOCK_ASSERT()       mtx_assert(&mfc_mtx, MA_OWNED)
  172 #define MFC_LOCK_INIT()                                                 \
  173         mtx_init(&mfc_mtx, "IPv4 multicast forwarding cache", NULL, MTX_DEF)
  174 #define MFC_LOCK_DESTROY()      mtx_destroy(&mfc_mtx)
  175 
  176 static VNET_DEFINE(vifi_t, numvifs);
  177 #define V_numvifs               VNET(numvifs)
  178 static VNET_DEFINE(struct vif, viftable[MAXVIFS]);
  179 #define V_viftable              VNET(viftable)
  180 SYSCTL_VNET_OPAQUE(_net_inet_ip, OID_AUTO, viftable, CTLFLAG_RD,
  181     &VNET_NAME(viftable), sizeof(V_viftable), "S,vif[MAXVIFS]",
  182     "IPv4 Multicast Interfaces (struct vif[MAXVIFS], netinet/ip_mroute.h)");
  183 
  184 static struct mtx vif_mtx;
  185 #define VIF_LOCK()              mtx_lock(&vif_mtx)
  186 #define VIF_UNLOCK()            mtx_unlock(&vif_mtx)
  187 #define VIF_LOCK_ASSERT()       mtx_assert(&vif_mtx, MA_OWNED)
  188 #define VIF_LOCK_INIT()                                                 \
  189         mtx_init(&vif_mtx, "IPv4 multicast interfaces", NULL, MTX_DEF)
  190 #define VIF_LOCK_DESTROY()      mtx_destroy(&vif_mtx)
  191 
  192 static eventhandler_tag if_detach_event_tag = NULL;
  193 
  194 static VNET_DEFINE(struct callout, expire_upcalls_ch);
  195 #define V_expire_upcalls_ch     VNET(expire_upcalls_ch)
  196 
  197 #define         EXPIRE_TIMEOUT  (hz / 4)        /* 4x / second          */
  198 #define         UPCALL_EXPIRE   6               /* number of timeouts   */
  199 
  200 /*
  201  * Bandwidth meter variables and constants
  202  */
  203 static MALLOC_DEFINE(M_BWMETER, "bwmeter", "multicast upcall bw meters");
  204 /*
  205  * Pending timeouts are stored in a hash table, the key being the
  206  * expiration time. Periodically, the entries are analysed and processed.
  207  */
  208 #define BW_METER_BUCKETS        1024
  209 static VNET_DEFINE(struct bw_meter*, bw_meter_timers[BW_METER_BUCKETS]);
  210 #define V_bw_meter_timers       VNET(bw_meter_timers)
  211 static VNET_DEFINE(struct callout, bw_meter_ch);
  212 #define V_bw_meter_ch           VNET(bw_meter_ch)
  213 #define BW_METER_PERIOD (hz)            /* periodical handling of bw meters */
  214 
  215 /*
  216  * Pending upcalls are stored in a vector which is flushed when
  217  * full, or periodically
  218  */
  219 static VNET_DEFINE(struct bw_upcall, bw_upcalls[BW_UPCALLS_MAX]);
  220 #define V_bw_upcalls            VNET(bw_upcalls)
  221 static VNET_DEFINE(u_int, bw_upcalls_n); /* # of pending upcalls */
  222 #define V_bw_upcalls_n          VNET(bw_upcalls_n)
  223 static VNET_DEFINE(struct callout, bw_upcalls_ch);
  224 #define V_bw_upcalls_ch         VNET(bw_upcalls_ch)
  225 
  226 #define BW_UPCALLS_PERIOD (hz)          /* periodical flush of bw upcalls */
  227 
  228 static VNET_DEFINE(struct pimstat, pimstat);
  229 #define V_pimstat               VNET(pimstat)
  230 
  231 SYSCTL_NODE(_net_inet, IPPROTO_PIM, pim, CTLFLAG_RW, 0, "PIM");
  232 SYSCTL_VNET_STRUCT(_net_inet_pim, PIMCTL_STATS, stats, CTLFLAG_RD,
  233     &VNET_NAME(pimstat), pimstat,
  234     "PIM Statistics (struct pimstat, netinet/pim_var.h)");
  235 
  236 static u_long   pim_squelch_wholepkt = 0;
  237 SYSCTL_ULONG(_net_inet_pim, OID_AUTO, squelch_wholepkt, CTLFLAG_RW,
  238     &pim_squelch_wholepkt, 0,
  239     "Disable IGMP_WHOLEPKT notifications if rendezvous point is unspecified");
  240 
  241 extern  struct domain inetdomain;
  242 static const struct protosw in_pim_protosw = {
  243         .pr_type =              SOCK_RAW,
  244         .pr_domain =            &inetdomain,
  245         .pr_protocol =          IPPROTO_PIM,
  246         .pr_flags =             PR_ATOMIC|PR_ADDR|PR_LASTHDR,
  247         .pr_input =             pim_input,
  248         .pr_output =            (pr_output_t*)rip_output,
  249         .pr_ctloutput =         rip_ctloutput,
  250         .pr_usrreqs =           &rip_usrreqs
  251 };
  252 static const struct encaptab *pim_encap_cookie;
  253 
  254 static int pim_encapcheck(const struct mbuf *, int, int, void *);
  255 
  256 /*
  257  * Note: the PIM Register encapsulation adds the following in front of a
  258  * data packet:
  259  *
  260  * struct pim_encap_hdr {
  261  *    struct ip ip;
  262  *    struct pim_encap_pimhdr  pim;
  263  * }
  264  *
  265  */
  266 
  267 struct pim_encap_pimhdr {
  268         struct pim pim;
  269         uint32_t   flags;
  270 };
  271 #define         PIM_ENCAP_TTL   64
  272 
  273 static struct ip pim_encap_iphdr = {
  274 #if BYTE_ORDER == LITTLE_ENDIAN
  275         sizeof(struct ip) >> 2,
  276         IPVERSION,
  277 #else
  278         IPVERSION,
  279         sizeof(struct ip) >> 2,
  280 #endif
  281         0,                      /* tos */
  282         sizeof(struct ip),      /* total length */
  283         0,                      /* id */
  284         0,                      /* frag offset */
  285         PIM_ENCAP_TTL,
  286         IPPROTO_PIM,
  287         0,                      /* checksum */
  288 };
  289 
  290 static struct pim_encap_pimhdr pim_encap_pimhdr = {
  291     {
  292         PIM_MAKE_VT(PIM_VERSION, PIM_REGISTER), /* PIM vers and message type */
  293         0,                      /* reserved */
  294         0,                      /* checksum */
  295     },
  296     0                           /* flags */
  297 };
  298 
  299 static VNET_DEFINE(vifi_t, reg_vif_num) = VIFI_INVALID;
  300 #define V_reg_vif_num           VNET(reg_vif_num)
  301 static VNET_DEFINE(struct ifnet, multicast_register_if);
  302 #define V_multicast_register_if VNET(multicast_register_if)
  303 
  304 /*
  305  * Private variables.
  306  */
  307 
  308 static u_long   X_ip_mcast_src(int);
  309 static int      X_ip_mforward(struct ip *, struct ifnet *, struct mbuf *,
  310                     struct ip_moptions *);
  311 static int      X_ip_mrouter_done(void);
  312 static int      X_ip_mrouter_get(struct socket *, struct sockopt *);
  313 static int      X_ip_mrouter_set(struct socket *, struct sockopt *);
  314 static int      X_legal_vif_num(int);
  315 static int      X_mrt_ioctl(u_long, caddr_t, int);
  316 
  317 static int      add_bw_upcall(struct bw_upcall *);
  318 static int      add_mfc(struct mfcctl2 *);
  319 static int      add_vif(struct vifctl *);
  320 static void     bw_meter_prepare_upcall(struct bw_meter *, struct timeval *);
  321 static void     bw_meter_process(void);
  322 static void     bw_meter_receive_packet(struct bw_meter *, int,
  323                     struct timeval *);
  324 static void     bw_upcalls_send(void);
  325 static int      del_bw_upcall(struct bw_upcall *);
  326 static int      del_mfc(struct mfcctl2 *);
  327 static int      del_vif(vifi_t);
  328 static int      del_vif_locked(vifi_t);
  329 static void     expire_bw_meter_process(void *);
  330 static void     expire_bw_upcalls_send(void *);
  331 static void     expire_mfc(struct mfc *);
  332 static void     expire_upcalls(void *);
  333 static void     free_bw_list(struct bw_meter *);
  334 static int      get_sg_cnt(struct sioc_sg_req *);
  335 static int      get_vif_cnt(struct sioc_vif_req *);
  336 static void     if_detached_event(void *, struct ifnet *);
  337 static int      ip_mdq(struct mbuf *, struct ifnet *, struct mfc *, vifi_t);
  338 static int      ip_mrouter_init(struct socket *, int);
  339 static __inline struct mfc *
  340                 mfc_find(struct in_addr *, struct in_addr *);
  341 static void     phyint_send(struct ip *, struct vif *, struct mbuf *);
  342 static struct mbuf *
  343                 pim_register_prepare(struct ip *, struct mbuf *);
  344 static int      pim_register_send(struct ip *, struct vif *,
  345                     struct mbuf *, struct mfc *);
  346 static int      pim_register_send_rp(struct ip *, struct vif *,
  347                     struct mbuf *, struct mfc *);
  348 static int      pim_register_send_upcall(struct ip *, struct vif *,
  349                     struct mbuf *, struct mfc *);
  350 static void     schedule_bw_meter(struct bw_meter *, struct timeval *);
  351 static void     send_packet(struct vif *, struct mbuf *);
  352 static int      set_api_config(uint32_t *);
  353 static int      set_assert(int);
  354 static int      socket_send(struct socket *, struct mbuf *,
  355                     struct sockaddr_in *);
  356 static void     unschedule_bw_meter(struct bw_meter *);
  357 
  358 /*
  359  * Kernel multicast forwarding API capabilities and setup.
  360  * If more API capabilities are added to the kernel, they should be
  361  * recorded in `mrt_api_support'.
  362  */
  363 #define MRT_API_VERSION         0x0305
  364 
  365 static const int mrt_api_version = MRT_API_VERSION;
  366 static const uint32_t mrt_api_support = (MRT_MFC_FLAGS_DISABLE_WRONGVIF |
  367                                          MRT_MFC_FLAGS_BORDER_VIF |
  368                                          MRT_MFC_RP |
  369                                          MRT_MFC_BW_UPCALL);
  370 static VNET_DEFINE(uint32_t, mrt_api_config);
  371 #define V_mrt_api_config        VNET(mrt_api_config)
  372 static VNET_DEFINE(int, pim_assert_enabled);
  373 #define V_pim_assert_enabled    VNET(pim_assert_enabled)
  374 static struct timeval pim_assert_interval = { 3, 0 };   /* Rate limit */
  375 
  376 /*
  377  * Find a route for a given origin IP address and multicast group address.
  378  * Statistics must be updated by the caller.
  379  */
  380 static __inline struct mfc *
  381 mfc_find(struct in_addr *o, struct in_addr *g)
  382 {
  383         struct mfc *rt;
  384 
  385         MFC_LOCK_ASSERT();
  386 
  387         LIST_FOREACH(rt, &V_mfchashtbl[MFCHASH(*o, *g)], mfc_hash) {
  388                 if (in_hosteq(rt->mfc_origin, *o) &&
  389                     in_hosteq(rt->mfc_mcastgrp, *g) &&
  390                     TAILQ_EMPTY(&rt->mfc_stall))
  391                         break;
  392         }
  393 
  394         return (rt);
  395 }
  396 
  397 /*
  398  * Handle MRT setsockopt commands to modify the multicast forwarding tables.
  399  */
  400 static int
  401 X_ip_mrouter_set(struct socket *so, struct sockopt *sopt)
  402 {
  403     int error, optval;
  404     vifi_t      vifi;
  405     struct      vifctl vifc;
  406     struct      mfcctl2 mfc;
  407     struct      bw_upcall bw_upcall;
  408     uint32_t    i;
  409 
  410     if (so != V_ip_mrouter && sopt->sopt_name != MRT_INIT)
  411         return EPERM;
  412 
  413     error = 0;
  414     switch (sopt->sopt_name) {
  415     case MRT_INIT:
  416         error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval);
  417         if (error)
  418             break;
  419         error = ip_mrouter_init(so, optval);
  420         break;
  421 
  422     case MRT_DONE:
  423         error = ip_mrouter_done();
  424         break;
  425 
  426     case MRT_ADD_VIF:
  427         error = sooptcopyin(sopt, &vifc, sizeof vifc, sizeof vifc);
  428         if (error)
  429             break;
  430         error = add_vif(&vifc);
  431         break;
  432 
  433     case MRT_DEL_VIF:
  434         error = sooptcopyin(sopt, &vifi, sizeof vifi, sizeof vifi);
  435         if (error)
  436             break;
  437         error = del_vif(vifi);
  438         break;
  439 
  440     case MRT_ADD_MFC:
  441     case MRT_DEL_MFC:
  442         /*
  443          * select data size depending on API version.
  444          */
  445         if (sopt->sopt_name == MRT_ADD_MFC &&
  446                 V_mrt_api_config & MRT_API_FLAGS_ALL) {
  447             error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl2),
  448                                 sizeof(struct mfcctl2));
  449         } else {
  450             error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl),
  451                                 sizeof(struct mfcctl));
  452             bzero((caddr_t)&mfc + sizeof(struct mfcctl),
  453                         sizeof(mfc) - sizeof(struct mfcctl));
  454         }
  455         if (error)
  456             break;
  457         if (sopt->sopt_name == MRT_ADD_MFC)
  458             error = add_mfc(&mfc);
  459         else
  460             error = del_mfc(&mfc);
  461         break;
  462 
  463     case MRT_ASSERT:
  464         error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval);
  465         if (error)
  466             break;
  467         set_assert(optval);
  468         break;
  469 
  470     case MRT_API_CONFIG:
  471         error = sooptcopyin(sopt, &i, sizeof i, sizeof i);
  472         if (!error)
  473             error = set_api_config(&i);
  474         if (!error)
  475             error = sooptcopyout(sopt, &i, sizeof i);
  476         break;
  477 
  478     case MRT_ADD_BW_UPCALL:
  479     case MRT_DEL_BW_UPCALL:
  480         error = sooptcopyin(sopt, &bw_upcall, sizeof bw_upcall,
  481                                 sizeof bw_upcall);
  482         if (error)
  483             break;
  484         if (sopt->sopt_name == MRT_ADD_BW_UPCALL)
  485             error = add_bw_upcall(&bw_upcall);
  486         else
  487             error = del_bw_upcall(&bw_upcall);
  488         break;
  489 
  490     default:
  491         error = EOPNOTSUPP;
  492         break;
  493     }
  494     return error;
  495 }
  496 
  497 /*
  498  * Handle MRT getsockopt commands
  499  */
  500 static int
  501 X_ip_mrouter_get(struct socket *so, struct sockopt *sopt)
  502 {
  503     int error;
  504 
  505     switch (sopt->sopt_name) {
  506     case MRT_VERSION:
  507         error = sooptcopyout(sopt, &mrt_api_version, sizeof mrt_api_version);
  508         break;
  509 
  510     case MRT_ASSERT:
  511         error = sooptcopyout(sopt, &V_pim_assert_enabled,
  512             sizeof V_pim_assert_enabled);
  513         break;
  514 
  515     case MRT_API_SUPPORT:
  516         error = sooptcopyout(sopt, &mrt_api_support, sizeof mrt_api_support);
  517         break;
  518 
  519     case MRT_API_CONFIG:
  520         error = sooptcopyout(sopt, &V_mrt_api_config, sizeof V_mrt_api_config);
  521         break;
  522 
  523     default:
  524         error = EOPNOTSUPP;
  525         break;
  526     }
  527     return error;
  528 }
  529 
  530 /*
  531  * Handle ioctl commands to obtain information from the cache
  532  */
  533 static int
  534 X_mrt_ioctl(u_long cmd, caddr_t data, int fibnum __unused)
  535 {
  536     int error = 0;
  537 
  538     /*
  539      * Currently the only function calling this ioctl routine is rtioctl().
  540      * Typically, only root can create the raw socket in order to execute
  541      * this ioctl method, however the request might be coming from a prison
  542      */
  543     error = priv_check(curthread, PRIV_NETINET_MROUTE);
  544     if (error)
  545         return (error);
  546     switch (cmd) {
  547     case (SIOCGETVIFCNT):
  548         error = get_vif_cnt((struct sioc_vif_req *)data);
  549         break;
  550 
  551     case (SIOCGETSGCNT):
  552         error = get_sg_cnt((struct sioc_sg_req *)data);
  553         break;
  554 
  555     default:
  556         error = EINVAL;
  557         break;
  558     }
  559     return error;
  560 }
  561 
  562 /*
  563  * returns the packet, byte, rpf-failure count for the source group provided
  564  */
  565 static int
  566 get_sg_cnt(struct sioc_sg_req *req)
  567 {
  568     struct mfc *rt;
  569 
  570     MFC_LOCK();
  571     rt = mfc_find(&req->src, &req->grp);
  572     if (rt == NULL) {
  573         MFC_UNLOCK();
  574         req->pktcnt = req->bytecnt = req->wrong_if = 0xffffffff;
  575         return EADDRNOTAVAIL;
  576     }
  577     req->pktcnt = rt->mfc_pkt_cnt;
  578     req->bytecnt = rt->mfc_byte_cnt;
  579     req->wrong_if = rt->mfc_wrong_if;
  580     MFC_UNLOCK();
  581     return 0;
  582 }
  583 
  584 /*
  585  * returns the input and output packet and byte counts on the vif provided
  586  */
  587 static int
  588 get_vif_cnt(struct sioc_vif_req *req)
  589 {
  590     vifi_t vifi = req->vifi;
  591 
  592     VIF_LOCK();
  593     if (vifi >= V_numvifs) {
  594         VIF_UNLOCK();
  595         return EINVAL;
  596     }
  597 
  598     req->icount = V_viftable[vifi].v_pkt_in;
  599     req->ocount = V_viftable[vifi].v_pkt_out;
  600     req->ibytes = V_viftable[vifi].v_bytes_in;
  601     req->obytes = V_viftable[vifi].v_bytes_out;
  602     VIF_UNLOCK();
  603 
  604     return 0;
  605 }
  606 
  607 static void
  608 if_detached_event(void *arg __unused, struct ifnet *ifp)
  609 {
  610     vifi_t vifi;
  611     u_long i;
  612 
  613     MROUTER_LOCK();
  614 
  615     if (V_ip_mrouter == NULL) {
  616         MROUTER_UNLOCK();
  617         return;
  618     }
  619 
  620     VIF_LOCK();
  621     MFC_LOCK();
  622 
  623     /*
  624      * Tear down multicast forwarder state associated with this ifnet.
  625      * 1. Walk the vif list, matching vifs against this ifnet.
  626      * 2. Walk the multicast forwarding cache (mfc) looking for
  627      *    inner matches with this vif's index.
  628      * 3. Expire any matching multicast forwarding cache entries.
  629      * 4. Free vif state. This should disable ALLMULTI on the interface.
  630      */
  631     for (vifi = 0; vifi < V_numvifs; vifi++) {
  632         if (V_viftable[vifi].v_ifp != ifp)
  633                 continue;
  634         for (i = 0; i < mfchashsize; i++) {
  635                 struct mfc *rt, *nrt;
  636                 for (rt = LIST_FIRST(&V_mfchashtbl[i]); rt; rt = nrt) {
  637                         nrt = LIST_NEXT(rt, mfc_hash);
  638                         if (rt->mfc_parent == vifi) {
  639                                 expire_mfc(rt);
  640                         }
  641                 }
  642         }
  643         del_vif_locked(vifi);
  644     }
  645 
  646     MFC_UNLOCK();
  647     VIF_UNLOCK();
  648 
  649     MROUTER_UNLOCK();
  650 }
  651                         
  652 /*
  653  * Enable multicast forwarding.
  654  */
  655 static int
  656 ip_mrouter_init(struct socket *so, int version)
  657 {
  658 
  659     CTR3(KTR_IPMF, "%s: so_type %d, pr_protocol %d", __func__,
  660         so->so_type, so->so_proto->pr_protocol);
  661 
  662     if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_IGMP)
  663         return EOPNOTSUPP;
  664 
  665     if (version != 1)
  666         return ENOPROTOOPT;
  667 
  668     MROUTER_LOCK();
  669 
  670     if (ip_mrouter_unloading) {
  671         MROUTER_UNLOCK();
  672         return ENOPROTOOPT;
  673     }
  674 
  675     if (V_ip_mrouter != NULL) {
  676         MROUTER_UNLOCK();
  677         return EADDRINUSE;
  678     }
  679 
  680     V_mfchashtbl = hashinit_flags(mfchashsize, M_MRTABLE, &V_mfchash,
  681         HASH_NOWAIT);
  682 
  683     callout_reset(&V_expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls,
  684         curvnet);
  685     callout_reset(&V_bw_upcalls_ch, BW_UPCALLS_PERIOD, expire_bw_upcalls_send,
  686         curvnet);
  687     callout_reset(&V_bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process,
  688         curvnet);
  689 
  690     V_ip_mrouter = so;
  691     ip_mrouter_cnt++;
  692 
  693     MROUTER_UNLOCK();
  694 
  695     CTR1(KTR_IPMF, "%s: done", __func__);
  696 
  697     return 0;
  698 }
  699 
  700 /*
  701  * Disable multicast forwarding.
  702  */
  703 static int
  704 X_ip_mrouter_done(void)
  705 {
  706     struct ifnet *ifp;
  707     u_long i;
  708     vifi_t vifi;
  709 
  710     MROUTER_LOCK();
  711 
  712     if (V_ip_mrouter == NULL) {
  713         MROUTER_UNLOCK();
  714         return EINVAL;
  715     }
  716 
  717     /*
  718      * Detach/disable hooks to the reset of the system.
  719      */
  720     V_ip_mrouter = NULL;
  721     ip_mrouter_cnt--;
  722     V_mrt_api_config = 0;
  723 
  724     VIF_LOCK();
  725 
  726     /*
  727      * For each phyint in use, disable promiscuous reception of all IP
  728      * multicasts.
  729      */
  730     for (vifi = 0; vifi < V_numvifs; vifi++) {
  731         if (!in_nullhost(V_viftable[vifi].v_lcl_addr) &&
  732                 !(V_viftable[vifi].v_flags & (VIFF_TUNNEL | VIFF_REGISTER))) {
  733             ifp = V_viftable[vifi].v_ifp;
  734             if_allmulti(ifp, 0);
  735         }
  736     }
  737     bzero((caddr_t)V_viftable, sizeof(V_viftable));
  738     V_numvifs = 0;
  739     V_pim_assert_enabled = 0;
  740     
  741     VIF_UNLOCK();
  742 
  743     callout_stop(&V_expire_upcalls_ch);
  744     callout_stop(&V_bw_upcalls_ch);
  745     callout_stop(&V_bw_meter_ch);
  746 
  747     MFC_LOCK();
  748 
  749     /*
  750      * Free all multicast forwarding cache entries.
  751      * Do not use hashdestroy(), as we must perform other cleanup.
  752      */
  753     for (i = 0; i < mfchashsize; i++) {
  754         struct mfc *rt, *nrt;
  755         for (rt = LIST_FIRST(&V_mfchashtbl[i]); rt; rt = nrt) {
  756                 nrt = LIST_NEXT(rt, mfc_hash);
  757                 expire_mfc(rt);
  758         }
  759     }
  760     free(V_mfchashtbl, M_MRTABLE);
  761     V_mfchashtbl = NULL;
  762 
  763     bzero(V_nexpire, sizeof(V_nexpire[0]) * mfchashsize);
  764 
  765     V_bw_upcalls_n = 0;
  766     bzero(V_bw_meter_timers, sizeof(V_bw_meter_timers));
  767 
  768     MFC_UNLOCK();
  769 
  770     V_reg_vif_num = VIFI_INVALID;
  771 
  772     MROUTER_UNLOCK();
  773 
  774     CTR1(KTR_IPMF, "%s: done", __func__);
  775 
  776     return 0;
  777 }
  778 
  779 /*
  780  * Set PIM assert processing global
  781  */
  782 static int
  783 set_assert(int i)
  784 {
  785     if ((i != 1) && (i != 0))
  786         return EINVAL;
  787 
  788     V_pim_assert_enabled = i;
  789 
  790     return 0;
  791 }
  792 
  793 /*
  794  * Configure API capabilities
  795  */
  796 int
  797 set_api_config(uint32_t *apival)
  798 {
  799     u_long i;
  800 
  801     /*
  802      * We can set the API capabilities only if it is the first operation
  803      * after MRT_INIT. I.e.:
  804      *  - there are no vifs installed
  805      *  - pim_assert is not enabled
  806      *  - the MFC table is empty
  807      */
  808     if (V_numvifs > 0) {
  809         *apival = 0;
  810         return EPERM;
  811     }
  812     if (V_pim_assert_enabled) {
  813         *apival = 0;
  814         return EPERM;
  815     }
  816 
  817     MFC_LOCK();
  818 
  819     for (i = 0; i < mfchashsize; i++) {
  820         if (LIST_FIRST(&V_mfchashtbl[i]) != NULL) {
  821             MFC_UNLOCK();
  822             *apival = 0;
  823             return EPERM;
  824         }
  825     }
  826 
  827     MFC_UNLOCK();
  828 
  829     V_mrt_api_config = *apival & mrt_api_support;
  830     *apival = V_mrt_api_config;
  831 
  832     return 0;
  833 }
  834 
  835 /*
  836  * Add a vif to the vif table
  837  */
  838 static int
  839 add_vif(struct vifctl *vifcp)
  840 {
  841     struct vif *vifp = V_viftable + vifcp->vifc_vifi;
  842     struct sockaddr_in sin = {sizeof sin, AF_INET};
  843     struct ifaddr *ifa;
  844     struct ifnet *ifp;
  845     int error;
  846 
  847     VIF_LOCK();
  848     if (vifcp->vifc_vifi >= MAXVIFS) {
  849         VIF_UNLOCK();
  850         return EINVAL;
  851     }
  852     /* rate limiting is no longer supported by this code */
  853     if (vifcp->vifc_rate_limit != 0) {
  854         log(LOG_ERR, "rate limiting is no longer supported\n");
  855         VIF_UNLOCK();
  856         return EINVAL;
  857     }
  858     if (!in_nullhost(vifp->v_lcl_addr)) {
  859         VIF_UNLOCK();
  860         return EADDRINUSE;
  861     }
  862     if (in_nullhost(vifcp->vifc_lcl_addr)) {
  863         VIF_UNLOCK();
  864         return EADDRNOTAVAIL;
  865     }
  866 
  867     /* Find the interface with an address in AF_INET family */
  868     if (vifcp->vifc_flags & VIFF_REGISTER) {
  869         /*
  870          * XXX: Because VIFF_REGISTER does not really need a valid
  871          * local interface (e.g. it could be 127.0.0.2), we don't
  872          * check its address.
  873          */
  874         ifp = NULL;
  875     } else {
  876         sin.sin_addr = vifcp->vifc_lcl_addr;
  877         ifa = ifa_ifwithaddr((struct sockaddr *)&sin);
  878         if (ifa == NULL) {
  879             VIF_UNLOCK();
  880             return EADDRNOTAVAIL;
  881         }
  882         ifp = ifa->ifa_ifp;
  883         ifa_free(ifa);
  884     }
  885 
  886     if ((vifcp->vifc_flags & VIFF_TUNNEL) != 0) {
  887         CTR1(KTR_IPMF, "%s: tunnels are no longer supported", __func__);
  888         VIF_UNLOCK();
  889         return EOPNOTSUPP;
  890     } else if (vifcp->vifc_flags & VIFF_REGISTER) {
  891         ifp = &V_multicast_register_if;
  892         CTR2(KTR_IPMF, "%s: add register vif for ifp %p", __func__, ifp);
  893         if (V_reg_vif_num == VIFI_INVALID) {
  894             if_initname(&V_multicast_register_if, "register_vif", 0);
  895             V_multicast_register_if.if_flags = IFF_LOOPBACK;
  896             V_reg_vif_num = vifcp->vifc_vifi;
  897         }
  898     } else {            /* Make sure the interface supports multicast */
  899         if ((ifp->if_flags & IFF_MULTICAST) == 0) {
  900             VIF_UNLOCK();
  901             return EOPNOTSUPP;
  902         }
  903 
  904         /* Enable promiscuous reception of all IP multicasts from the if */
  905         error = if_allmulti(ifp, 1);
  906         if (error) {
  907             VIF_UNLOCK();
  908             return error;
  909         }
  910     }
  911 
  912     vifp->v_flags     = vifcp->vifc_flags;
  913     vifp->v_threshold = vifcp->vifc_threshold;
  914     vifp->v_lcl_addr  = vifcp->vifc_lcl_addr;
  915     vifp->v_rmt_addr  = vifcp->vifc_rmt_addr;
  916     vifp->v_ifp       = ifp;
  917     /* initialize per vif pkt counters */
  918     vifp->v_pkt_in    = 0;
  919     vifp->v_pkt_out   = 0;
  920     vifp->v_bytes_in  = 0;
  921     vifp->v_bytes_out = 0;
  922 
  923     /* Adjust numvifs up if the vifi is higher than numvifs */
  924     if (V_numvifs <= vifcp->vifc_vifi)
  925         V_numvifs = vifcp->vifc_vifi + 1;
  926 
  927     VIF_UNLOCK();
  928 
  929     CTR4(KTR_IPMF, "%s: add vif %d laddr %s thresh %x", __func__,
  930         (int)vifcp->vifc_vifi, inet_ntoa(vifcp->vifc_lcl_addr),
  931         (int)vifcp->vifc_threshold);
  932 
  933     return 0;
  934 }
  935 
  936 /*
  937  * Delete a vif from the vif table
  938  */
  939 static int
  940 del_vif_locked(vifi_t vifi)
  941 {
  942     struct vif *vifp;
  943 
  944     VIF_LOCK_ASSERT();
  945 
  946     if (vifi >= V_numvifs) {
  947         return EINVAL;
  948     }
  949     vifp = &V_viftable[vifi];
  950     if (in_nullhost(vifp->v_lcl_addr)) {
  951         return EADDRNOTAVAIL;
  952     }
  953 
  954     if (!(vifp->v_flags & (VIFF_TUNNEL | VIFF_REGISTER)))
  955         if_allmulti(vifp->v_ifp, 0);
  956 
  957     if (vifp->v_flags & VIFF_REGISTER)
  958         V_reg_vif_num = VIFI_INVALID;
  959 
  960     bzero((caddr_t)vifp, sizeof (*vifp));
  961 
  962     CTR2(KTR_IPMF, "%s: delete vif %d", __func__, (int)vifi);
  963 
  964     /* Adjust numvifs down */
  965     for (vifi = V_numvifs; vifi > 0; vifi--)
  966         if (!in_nullhost(V_viftable[vifi-1].v_lcl_addr))
  967             break;
  968     V_numvifs = vifi;
  969 
  970     return 0;
  971 }
  972 
  973 static int
  974 del_vif(vifi_t vifi)
  975 {
  976     int cc;
  977 
  978     VIF_LOCK();
  979     cc = del_vif_locked(vifi);
  980     VIF_UNLOCK();
  981 
  982     return cc;
  983 }
  984 
  985 /*
  986  * update an mfc entry without resetting counters and S,G addresses.
  987  */
  988 static void
  989 update_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp)
  990 {
  991     int i;
  992 
  993     rt->mfc_parent = mfccp->mfcc_parent;
  994     for (i = 0; i < V_numvifs; i++) {
  995         rt->mfc_ttls[i] = mfccp->mfcc_ttls[i];
  996         rt->mfc_flags[i] = mfccp->mfcc_flags[i] & V_mrt_api_config &
  997             MRT_MFC_FLAGS_ALL;
  998     }
  999     /* set the RP address */
 1000     if (V_mrt_api_config & MRT_MFC_RP)
 1001         rt->mfc_rp = mfccp->mfcc_rp;
 1002     else
 1003         rt->mfc_rp.s_addr = INADDR_ANY;
 1004 }
 1005 
 1006 /*
 1007  * fully initialize an mfc entry from the parameter.
 1008  */
 1009 static void
 1010 init_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp)
 1011 {
 1012     rt->mfc_origin     = mfccp->mfcc_origin;
 1013     rt->mfc_mcastgrp   = mfccp->mfcc_mcastgrp;
 1014 
 1015     update_mfc_params(rt, mfccp);
 1016 
 1017     /* initialize pkt counters per src-grp */
 1018     rt->mfc_pkt_cnt    = 0;
 1019     rt->mfc_byte_cnt   = 0;
 1020     rt->mfc_wrong_if   = 0;
 1021     timevalclear(&rt->mfc_last_assert);
 1022 }
 1023 
 1024 static void
 1025 expire_mfc(struct mfc *rt)
 1026 {
 1027         struct rtdetq *rte, *nrte;
 1028 
 1029         MFC_LOCK_ASSERT();
 1030 
 1031         free_bw_list(rt->mfc_bw_meter);
 1032 
 1033         TAILQ_FOREACH_SAFE(rte, &rt->mfc_stall, rte_link, nrte) {
 1034                 m_freem(rte->m);
 1035                 TAILQ_REMOVE(&rt->mfc_stall, rte, rte_link);
 1036                 free(rte, M_MRTABLE);
 1037         }
 1038 
 1039         LIST_REMOVE(rt, mfc_hash);
 1040         free(rt, M_MRTABLE);
 1041 }
 1042 
 1043 /*
 1044  * Add an mfc entry
 1045  */
 1046 static int
 1047 add_mfc(struct mfcctl2 *mfccp)
 1048 {
 1049     struct mfc *rt;
 1050     struct rtdetq *rte, *nrte;
 1051     u_long hash = 0;
 1052     u_short nstl;
 1053 
 1054     VIF_LOCK();
 1055     MFC_LOCK();
 1056 
 1057     rt = mfc_find(&mfccp->mfcc_origin, &mfccp->mfcc_mcastgrp);
 1058 
 1059     /* If an entry already exists, just update the fields */
 1060     if (rt) {
 1061         CTR4(KTR_IPMF, "%s: update mfc orig %s group %lx parent %x",
 1062             __func__, inet_ntoa(mfccp->mfcc_origin),
 1063             (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
 1064             mfccp->mfcc_parent);
 1065         update_mfc_params(rt, mfccp);
 1066         MFC_UNLOCK();
 1067         VIF_UNLOCK();
 1068         return (0);
 1069     }
 1070 
 1071     /*
 1072      * Find the entry for which the upcall was made and update
 1073      */
 1074     nstl = 0;
 1075     hash = MFCHASH(mfccp->mfcc_origin, mfccp->mfcc_mcastgrp);
 1076     LIST_FOREACH(rt, &V_mfchashtbl[hash], mfc_hash) {
 1077         if (in_hosteq(rt->mfc_origin, mfccp->mfcc_origin) &&
 1078             in_hosteq(rt->mfc_mcastgrp, mfccp->mfcc_mcastgrp) &&
 1079             !TAILQ_EMPTY(&rt->mfc_stall)) {
 1080                 CTR5(KTR_IPMF,
 1081                     "%s: add mfc orig %s group %lx parent %x qh %p",
 1082                     __func__, inet_ntoa(mfccp->mfcc_origin),
 1083                     (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
 1084                     mfccp->mfcc_parent,
 1085                     TAILQ_FIRST(&rt->mfc_stall));
 1086                 if (nstl++)
 1087                         CTR1(KTR_IPMF, "%s: multiple matches", __func__);
 1088 
 1089                 init_mfc_params(rt, mfccp);
 1090                 rt->mfc_expire = 0;     /* Don't clean this guy up */
 1091                 V_nexpire[hash]--;
 1092 
 1093                 /* Free queued packets, but attempt to forward them first. */
 1094                 TAILQ_FOREACH_SAFE(rte, &rt->mfc_stall, rte_link, nrte) {
 1095                         if (rte->ifp != NULL)
 1096                                 ip_mdq(rte->m, rte->ifp, rt, -1);
 1097                         m_freem(rte->m);
 1098                         TAILQ_REMOVE(&rt->mfc_stall, rte, rte_link);
 1099                         rt->mfc_nstall--;
 1100                         free(rte, M_MRTABLE);
 1101                 }
 1102         }
 1103     }
 1104 
 1105     /*
 1106      * It is possible that an entry is being inserted without an upcall
 1107      */
 1108     if (nstl == 0) {
 1109         CTR1(KTR_IPMF, "%s: adding mfc w/o upcall", __func__);
 1110         LIST_FOREACH(rt, &V_mfchashtbl[hash], mfc_hash) {
 1111                 if (in_hosteq(rt->mfc_origin, mfccp->mfcc_origin) &&
 1112                     in_hosteq(rt->mfc_mcastgrp, mfccp->mfcc_mcastgrp)) {
 1113                         init_mfc_params(rt, mfccp);
 1114                         if (rt->mfc_expire)
 1115                             V_nexpire[hash]--;
 1116                         rt->mfc_expire = 0;
 1117                         break; /* XXX */
 1118                 }
 1119         }
 1120 
 1121         if (rt == NULL) {               /* no upcall, so make a new entry */
 1122             rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
 1123             if (rt == NULL) {
 1124                 MFC_UNLOCK();
 1125                 VIF_UNLOCK();
 1126                 return (ENOBUFS);
 1127             }
 1128 
 1129             init_mfc_params(rt, mfccp);
 1130             TAILQ_INIT(&rt->mfc_stall);
 1131             rt->mfc_nstall = 0;
 1132 
 1133             rt->mfc_expire     = 0;
 1134             rt->mfc_bw_meter = NULL;
 1135 
 1136             /* insert new entry at head of hash chain */
 1137             LIST_INSERT_HEAD(&V_mfchashtbl[hash], rt, mfc_hash);
 1138         }
 1139     }
 1140 
 1141     MFC_UNLOCK();
 1142     VIF_UNLOCK();
 1143 
 1144     return (0);
 1145 }
 1146 
 1147 /*
 1148  * Delete an mfc entry
 1149  */
 1150 static int
 1151 del_mfc(struct mfcctl2 *mfccp)
 1152 {
 1153     struct in_addr      origin;
 1154     struct in_addr      mcastgrp;
 1155     struct mfc          *rt;
 1156 
 1157     origin = mfccp->mfcc_origin;
 1158     mcastgrp = mfccp->mfcc_mcastgrp;
 1159 
 1160     CTR3(KTR_IPMF, "%s: delete mfc orig %s group %lx", __func__,
 1161         inet_ntoa(origin), (u_long)ntohl(mcastgrp.s_addr));
 1162 
 1163     MFC_LOCK();
 1164 
 1165     rt = mfc_find(&origin, &mcastgrp);
 1166     if (rt == NULL) {
 1167         MFC_UNLOCK();
 1168         return EADDRNOTAVAIL;
 1169     }
 1170 
 1171     /*
 1172      * free the bw_meter entries
 1173      */
 1174     free_bw_list(rt->mfc_bw_meter);
 1175     rt->mfc_bw_meter = NULL;
 1176 
 1177     LIST_REMOVE(rt, mfc_hash);
 1178     free(rt, M_MRTABLE);
 1179 
 1180     MFC_UNLOCK();
 1181 
 1182     return (0);
 1183 }
 1184 
 1185 /*
 1186  * Send a message to the routing daemon on the multicast routing socket.
 1187  */
 1188 static int
 1189 socket_send(struct socket *s, struct mbuf *mm, struct sockaddr_in *src)
 1190 {
 1191     if (s) {
 1192         SOCKBUF_LOCK(&s->so_rcv);
 1193         if (sbappendaddr_locked(&s->so_rcv, (struct sockaddr *)src, mm,
 1194             NULL) != 0) {
 1195             sorwakeup_locked(s);
 1196             return 0;
 1197         }
 1198         SOCKBUF_UNLOCK(&s->so_rcv);
 1199     }
 1200     m_freem(mm);
 1201     return -1;
 1202 }
 1203 
 1204 /*
 1205  * IP multicast forwarding function. This function assumes that the packet
 1206  * pointed to by "ip" has arrived on (or is about to be sent to) the interface
 1207  * pointed to by "ifp", and the packet is to be relayed to other networks
 1208  * that have members of the packet's destination IP multicast group.
 1209  *
 1210  * The packet is returned unscathed to the caller, unless it is
 1211  * erroneous, in which case a non-zero return value tells the caller to
 1212  * discard it.
 1213  */
 1214 
 1215 #define TUNNEL_LEN  12  /* # bytes of IP option for tunnel encapsulation  */
 1216 
 1217 static int
 1218 X_ip_mforward(struct ip *ip, struct ifnet *ifp, struct mbuf *m,
 1219     struct ip_moptions *imo)
 1220 {
 1221     struct mfc *rt;
 1222     int error;
 1223     vifi_t vifi;
 1224 
 1225     CTR3(KTR_IPMF, "ip_mforward: delete mfc orig %s group %lx ifp %p",
 1226         inet_ntoa(ip->ip_src), (u_long)ntohl(ip->ip_dst.s_addr), ifp);
 1227 
 1228     if (ip->ip_hl < (sizeof(struct ip) + TUNNEL_LEN) >> 2 ||
 1229                 ((u_char *)(ip + 1))[1] != IPOPT_LSRR ) {
 1230         /*
 1231          * Packet arrived via a physical interface or
 1232          * an encapsulated tunnel or a register_vif.
 1233          */
 1234     } else {
 1235         /*
 1236          * Packet arrived through a source-route tunnel.
 1237          * Source-route tunnels are no longer supported.
 1238          */
 1239         return (1);
 1240     }
 1241 
 1242     VIF_LOCK();
 1243     MFC_LOCK();
 1244     if (imo && ((vifi = imo->imo_multicast_vif) < V_numvifs)) {
 1245         if (ip->ip_ttl < MAXTTL)
 1246             ip->ip_ttl++;       /* compensate for -1 in *_send routines */
 1247         error = ip_mdq(m, ifp, NULL, vifi);
 1248         MFC_UNLOCK();
 1249         VIF_UNLOCK();
 1250         return error;
 1251     }
 1252 
 1253     /*
 1254      * Don't forward a packet with time-to-live of zero or one,
 1255      * or a packet destined to a local-only group.
 1256      */
 1257     if (ip->ip_ttl <= 1 || IN_LOCAL_GROUP(ntohl(ip->ip_dst.s_addr))) {
 1258         MFC_UNLOCK();
 1259         VIF_UNLOCK();
 1260         return 0;
 1261     }
 1262 
 1263     /*
 1264      * Determine forwarding vifs from the forwarding cache table
 1265      */
 1266     MRTSTAT_INC(mrts_mfc_lookups);
 1267     rt = mfc_find(&ip->ip_src, &ip->ip_dst);
 1268 
 1269     /* Entry exists, so forward if necessary */
 1270     if (rt != NULL) {
 1271         error = ip_mdq(m, ifp, rt, -1);
 1272         MFC_UNLOCK();
 1273         VIF_UNLOCK();
 1274         return error;
 1275     } else {
 1276         /*
 1277          * If we don't have a route for packet's origin,
 1278          * Make a copy of the packet & send message to routing daemon
 1279          */
 1280 
 1281         struct mbuf *mb0;
 1282         struct rtdetq *rte;
 1283         u_long hash;
 1284         int hlen = ip->ip_hl << 2;
 1285 
 1286         MRTSTAT_INC(mrts_mfc_misses);
 1287         MRTSTAT_INC(mrts_no_route);
 1288         CTR2(KTR_IPMF, "ip_mforward: no mfc for (%s,%lx)",
 1289             inet_ntoa(ip->ip_src), (u_long)ntohl(ip->ip_dst.s_addr));
 1290 
 1291         /*
 1292          * Allocate mbufs early so that we don't do extra work if we are
 1293          * just going to fail anyway.  Make sure to pullup the header so
 1294          * that other people can't step on it.
 1295          */
 1296         rte = (struct rtdetq *)malloc((sizeof *rte), M_MRTABLE,
 1297             M_NOWAIT|M_ZERO);
 1298         if (rte == NULL) {
 1299             MFC_UNLOCK();
 1300             VIF_UNLOCK();
 1301             return ENOBUFS;
 1302         }
 1303 
 1304         mb0 = m_copypacket(m, M_DONTWAIT);
 1305         if (mb0 && (M_HASCL(mb0) || mb0->m_len < hlen))
 1306             mb0 = m_pullup(mb0, hlen);
 1307         if (mb0 == NULL) {
 1308             free(rte, M_MRTABLE);
 1309             MFC_UNLOCK();
 1310             VIF_UNLOCK();
 1311             return ENOBUFS;
 1312         }
 1313 
 1314         /* is there an upcall waiting for this flow ? */
 1315         hash = MFCHASH(ip->ip_src, ip->ip_dst);
 1316         LIST_FOREACH(rt, &V_mfchashtbl[hash], mfc_hash) {
 1317                 if (in_hosteq(ip->ip_src, rt->mfc_origin) &&
 1318                     in_hosteq(ip->ip_dst, rt->mfc_mcastgrp) &&
 1319                     !TAILQ_EMPTY(&rt->mfc_stall))
 1320                         break;
 1321         }
 1322 
 1323         if (rt == NULL) {
 1324             int i;
 1325             struct igmpmsg *im;
 1326             struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
 1327             struct mbuf *mm;
 1328 
 1329             /*
 1330              * Locate the vifi for the incoming interface for this packet.
 1331              * If none found, drop packet.
 1332              */
 1333             for (vifi = 0; vifi < V_numvifs &&
 1334                     V_viftable[vifi].v_ifp != ifp; vifi++)
 1335                 ;
 1336             if (vifi >= V_numvifs)      /* vif not found, drop packet */
 1337                 goto non_fatal;
 1338 
 1339             /* no upcall, so make a new entry */
 1340             rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
 1341             if (rt == NULL)
 1342                 goto fail;
 1343 
 1344             /* Make a copy of the header to send to the user level process */
 1345             mm = m_copy(mb0, 0, hlen);
 1346             if (mm == NULL)
 1347                 goto fail1;
 1348 
 1349             /*
 1350              * Send message to routing daemon to install
 1351              * a route into the kernel table
 1352              */
 1353 
 1354             im = mtod(mm, struct igmpmsg *);
 1355             im->im_msgtype = IGMPMSG_NOCACHE;
 1356             im->im_mbz = 0;
 1357             im->im_vif = vifi;
 1358 
 1359             MRTSTAT_INC(mrts_upcalls);
 1360 
 1361             k_igmpsrc.sin_addr = ip->ip_src;
 1362             if (socket_send(V_ip_mrouter, mm, &k_igmpsrc) < 0) {
 1363                 CTR0(KTR_IPMF, "ip_mforward: socket queue full");
 1364                 MRTSTAT_INC(mrts_upq_sockfull);
 1365 fail1:
 1366                 free(rt, M_MRTABLE);
 1367 fail:
 1368                 free(rte, M_MRTABLE);
 1369                 m_freem(mb0);
 1370                 MFC_UNLOCK();
 1371                 VIF_UNLOCK();
 1372                 return ENOBUFS;
 1373             }
 1374 
 1375             /* insert new entry at head of hash chain */
 1376             rt->mfc_origin.s_addr     = ip->ip_src.s_addr;
 1377             rt->mfc_mcastgrp.s_addr   = ip->ip_dst.s_addr;
 1378             rt->mfc_expire            = UPCALL_EXPIRE;
 1379             V_nexpire[hash]++;
 1380             for (i = 0; i < V_numvifs; i++) {
 1381                 rt->mfc_ttls[i] = 0;
 1382                 rt->mfc_flags[i] = 0;
 1383             }
 1384             rt->mfc_parent = -1;
 1385 
 1386             /* clear the RP address */
 1387             rt->mfc_rp.s_addr = INADDR_ANY;
 1388             rt->mfc_bw_meter = NULL;
 1389 
 1390             /* initialize pkt counters per src-grp */
 1391             rt->mfc_pkt_cnt = 0;
 1392             rt->mfc_byte_cnt = 0;
 1393             rt->mfc_wrong_if = 0;
 1394             timevalclear(&rt->mfc_last_assert);
 1395 
 1396             TAILQ_INIT(&rt->mfc_stall);
 1397             rt->mfc_nstall = 0;
 1398 
 1399             /* link into table */
 1400             LIST_INSERT_HEAD(&V_mfchashtbl[hash], rt, mfc_hash);
 1401             TAILQ_INSERT_HEAD(&rt->mfc_stall, rte, rte_link);
 1402             rt->mfc_nstall++;
 1403 
 1404         } else {
 1405             /* determine if queue has overflowed */
 1406             if (rt->mfc_nstall > MAX_UPQ) {
 1407                 MRTSTAT_INC(mrts_upq_ovflw);
 1408 non_fatal:
 1409                 free(rte, M_MRTABLE);
 1410                 m_freem(mb0);
 1411                 MFC_UNLOCK();
 1412                 VIF_UNLOCK();
 1413                 return (0);
 1414             }
 1415             TAILQ_INSERT_TAIL(&rt->mfc_stall, rte, rte_link);
 1416             rt->mfc_nstall++;
 1417         }
 1418 
 1419         rte->m                  = mb0;
 1420         rte->ifp                = ifp;
 1421 
 1422         MFC_UNLOCK();
 1423         VIF_UNLOCK();
 1424 
 1425         return 0;
 1426     }
 1427 }
 1428 
 1429 /*
 1430  * Clean up the cache entry if upcall is not serviced
 1431  */
 1432 static void
 1433 expire_upcalls(void *arg)
 1434 {
 1435     u_long i;
 1436 
 1437     CURVNET_SET((struct vnet *) arg);
 1438 
 1439     MFC_LOCK();
 1440 
 1441     for (i = 0; i < mfchashsize; i++) {
 1442         struct mfc *rt, *nrt;
 1443 
 1444         if (V_nexpire[i] == 0)
 1445             continue;
 1446 
 1447         for (rt = LIST_FIRST(&V_mfchashtbl[i]); rt; rt = nrt) {
 1448                 nrt = LIST_NEXT(rt, mfc_hash);
 1449 
 1450                 if (TAILQ_EMPTY(&rt->mfc_stall))
 1451                         continue;
 1452 
 1453                 if (rt->mfc_expire == 0 || --rt->mfc_expire > 0)
 1454                         continue;
 1455 
 1456                 /*
 1457                  * free the bw_meter entries
 1458                  */
 1459                 while (rt->mfc_bw_meter != NULL) {
 1460                     struct bw_meter *x = rt->mfc_bw_meter;
 1461 
 1462                     rt->mfc_bw_meter = x->bm_mfc_next;
 1463                     free(x, M_BWMETER);
 1464                 }
 1465 
 1466                 MRTSTAT_INC(mrts_cache_cleanups);
 1467                 CTR3(KTR_IPMF, "%s: expire (%lx, %lx)", __func__,
 1468                     (u_long)ntohl(rt->mfc_origin.s_addr),
 1469                     (u_long)ntohl(rt->mfc_mcastgrp.s_addr));
 1470 
 1471                 expire_mfc(rt);
 1472             }
 1473     }
 1474 
 1475     MFC_UNLOCK();
 1476 
 1477     callout_reset(&V_expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls,
 1478         curvnet);
 1479 
 1480     CURVNET_RESTORE();
 1481 }
 1482 
 1483 /*
 1484  * Packet forwarding routine once entry in the cache is made
 1485  */
 1486 static int
 1487 ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif)
 1488 {
 1489     struct ip  *ip = mtod(m, struct ip *);
 1490     vifi_t vifi;
 1491     int plen = ip->ip_len;
 1492 
 1493     VIF_LOCK_ASSERT();
 1494 
 1495     /*
 1496      * If xmt_vif is not -1, send on only the requested vif.
 1497      *
 1498      * (since vifi_t is u_short, -1 becomes MAXUSHORT, which > numvifs.)
 1499      */
 1500     if (xmt_vif < V_numvifs) {
 1501         if (V_viftable[xmt_vif].v_flags & VIFF_REGISTER)
 1502                 pim_register_send(ip, V_viftable + xmt_vif, m, rt);
 1503         else
 1504                 phyint_send(ip, V_viftable + xmt_vif, m);
 1505         return 1;
 1506     }
 1507 
 1508     /*
 1509      * Don't forward if it didn't arrive from the parent vif for its origin.
 1510      */
 1511     vifi = rt->mfc_parent;
 1512     if ((vifi >= V_numvifs) || (V_viftable[vifi].v_ifp != ifp)) {
 1513         CTR4(KTR_IPMF, "%s: rx on wrong ifp %p (vifi %d, v_ifp %p)",
 1514             __func__, ifp, (int)vifi, V_viftable[vifi].v_ifp);
 1515         MRTSTAT_INC(mrts_wrong_if);
 1516         ++rt->mfc_wrong_if;
 1517         /*
 1518          * If we are doing PIM assert processing, send a message
 1519          * to the routing daemon.
 1520          *
 1521          * XXX: A PIM-SM router needs the WRONGVIF detection so it
 1522          * can complete the SPT switch, regardless of the type
 1523          * of the iif (broadcast media, GRE tunnel, etc).
 1524          */
 1525         if (V_pim_assert_enabled && (vifi < V_numvifs) &&
 1526             V_viftable[vifi].v_ifp) {
 1527 
 1528             if (ifp == &V_multicast_register_if)
 1529                 PIMSTAT_INC(pims_rcv_registers_wrongiif);
 1530 
 1531             /* Get vifi for the incoming packet */
 1532             for (vifi = 0; vifi < V_numvifs && V_viftable[vifi].v_ifp != ifp;
 1533                 vifi++)
 1534                 ;
 1535             if (vifi >= V_numvifs)
 1536                 return 0;       /* The iif is not found: ignore the packet. */
 1537 
 1538             if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_DISABLE_WRONGVIF)
 1539                 return 0;       /* WRONGVIF disabled: ignore the packet */
 1540 
 1541             if (ratecheck(&rt->mfc_last_assert, &pim_assert_interval)) {
 1542                 struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
 1543                 struct igmpmsg *im;
 1544                 int hlen = ip->ip_hl << 2;
 1545                 struct mbuf *mm = m_copy(m, 0, hlen);
 1546 
 1547                 if (mm && (M_HASCL(mm) || mm->m_len < hlen))
 1548                     mm = m_pullup(mm, hlen);
 1549                 if (mm == NULL)
 1550                     return ENOBUFS;
 1551 
 1552                 im = mtod(mm, struct igmpmsg *);
 1553                 im->im_msgtype  = IGMPMSG_WRONGVIF;
 1554                 im->im_mbz              = 0;
 1555                 im->im_vif              = vifi;
 1556 
 1557                 MRTSTAT_INC(mrts_upcalls);
 1558 
 1559                 k_igmpsrc.sin_addr = im->im_src;
 1560                 if (socket_send(V_ip_mrouter, mm, &k_igmpsrc) < 0) {
 1561                     CTR1(KTR_IPMF, "%s: socket queue full", __func__);
 1562                     MRTSTAT_INC(mrts_upq_sockfull);
 1563                     return ENOBUFS;
 1564                 }
 1565             }
 1566         }
 1567         return 0;
 1568     }
 1569 
 1570 
 1571     /* If I sourced this packet, it counts as output, else it was input. */
 1572     if (in_hosteq(ip->ip_src, V_viftable[vifi].v_lcl_addr)) {
 1573         V_viftable[vifi].v_pkt_out++;
 1574         V_viftable[vifi].v_bytes_out += plen;
 1575     } else {
 1576         V_viftable[vifi].v_pkt_in++;
 1577         V_viftable[vifi].v_bytes_in += plen;
 1578     }
 1579     rt->mfc_pkt_cnt++;
 1580     rt->mfc_byte_cnt += plen;
 1581 
 1582     /*
 1583      * For each vif, decide if a copy of the packet should be forwarded.
 1584      * Forward if:
 1585      *          - the ttl exceeds the vif's threshold
 1586      *          - there are group members downstream on interface
 1587      */
 1588     for (vifi = 0; vifi < V_numvifs; vifi++)
 1589         if ((rt->mfc_ttls[vifi] > 0) && (ip->ip_ttl > rt->mfc_ttls[vifi])) {
 1590             V_viftable[vifi].v_pkt_out++;
 1591             V_viftable[vifi].v_bytes_out += plen;
 1592             if (V_viftable[vifi].v_flags & VIFF_REGISTER)
 1593                 pim_register_send(ip, V_viftable + vifi, m, rt);
 1594             else
 1595                 phyint_send(ip, V_viftable + vifi, m);
 1596         }
 1597 
 1598     /*
 1599      * Perform upcall-related bw measuring.
 1600      */
 1601     if (rt->mfc_bw_meter != NULL) {
 1602         struct bw_meter *x;
 1603         struct timeval now;
 1604 
 1605         microtime(&now);
 1606         MFC_LOCK_ASSERT();
 1607         for (x = rt->mfc_bw_meter; x != NULL; x = x->bm_mfc_next)
 1608             bw_meter_receive_packet(x, plen, &now);
 1609     }
 1610 
 1611     return 0;
 1612 }
 1613 
 1614 /*
 1615  * Check if a vif number is legal/ok. This is used by in_mcast.c.
 1616  */
 1617 static int
 1618 X_legal_vif_num(int vif)
 1619 {
 1620         int ret;
 1621 
 1622         ret = 0;
 1623         if (vif < 0)
 1624                 return (ret);
 1625 
 1626         VIF_LOCK();
 1627         if (vif < V_numvifs)
 1628                 ret = 1;
 1629         VIF_UNLOCK();
 1630 
 1631         return (ret);
 1632 }
 1633 
 1634 /*
 1635  * Return the local address used by this vif
 1636  */
 1637 static u_long
 1638 X_ip_mcast_src(int vifi)
 1639 {
 1640         in_addr_t addr;
 1641 
 1642         addr = INADDR_ANY;
 1643         if (vifi < 0)
 1644                 return (addr);
 1645 
 1646         VIF_LOCK();
 1647         if (vifi < V_numvifs)
 1648                 addr = V_viftable[vifi].v_lcl_addr.s_addr;
 1649         VIF_UNLOCK();
 1650 
 1651         return (addr);
 1652 }
 1653 
 1654 static void
 1655 phyint_send(struct ip *ip, struct vif *vifp, struct mbuf *m)
 1656 {
 1657     struct mbuf *mb_copy;
 1658     int hlen = ip->ip_hl << 2;
 1659 
 1660     VIF_LOCK_ASSERT();
 1661 
 1662     /*
 1663      * Make a new reference to the packet; make sure that
 1664      * the IP header is actually copied, not just referenced,
 1665      * so that ip_output() only scribbles on the copy.
 1666      */
 1667     mb_copy = m_copypacket(m, M_DONTWAIT);
 1668     if (mb_copy && (M_HASCL(mb_copy) || mb_copy->m_len < hlen))
 1669         mb_copy = m_pullup(mb_copy, hlen);
 1670     if (mb_copy == NULL)
 1671         return;
 1672 
 1673     send_packet(vifp, mb_copy);
 1674 }
 1675 
 1676 static void
 1677 send_packet(struct vif *vifp, struct mbuf *m)
 1678 {
 1679         struct ip_moptions imo;
 1680         struct in_multi *imm[2];
 1681         int error;
 1682 
 1683         VIF_LOCK_ASSERT();
 1684 
 1685         imo.imo_multicast_ifp  = vifp->v_ifp;
 1686         imo.imo_multicast_ttl  = mtod(m, struct ip *)->ip_ttl - 1;
 1687         imo.imo_multicast_loop = 1;
 1688         imo.imo_multicast_vif  = -1;
 1689         imo.imo_num_memberships = 0;
 1690         imo.imo_max_memberships = 2;
 1691         imo.imo_membership  = &imm[0];
 1692 
 1693         /*
 1694          * Re-entrancy should not be a problem here, because
 1695          * the packets that we send out and are looped back at us
 1696          * should get rejected because they appear to come from
 1697          * the loopback interface, thus preventing looping.
 1698          */
 1699         error = ip_output(m, NULL, NULL, IP_FORWARDING, &imo, NULL);
 1700         CTR3(KTR_IPMF, "%s: vif %td err %d", __func__,
 1701             (ptrdiff_t)(vifp - V_viftable), error);
 1702 }
 1703 
 1704 /*
 1705  * Stubs for old RSVP socket shim implementation.
 1706  */
 1707 
 1708 static int
 1709 X_ip_rsvp_vif(struct socket *so __unused, struct sockopt *sopt __unused)
 1710 {
 1711 
 1712         return (EOPNOTSUPP);
 1713 }
 1714 
 1715 static void
 1716 X_ip_rsvp_force_done(struct socket *so __unused)
 1717 {
 1718 
 1719 }
 1720 
 1721 static void
 1722 X_rsvp_input(struct mbuf *m, int off __unused)
 1723 {
 1724 
 1725         if (!V_rsvp_on)
 1726                 m_freem(m);
 1727 }
 1728 
 1729 /*
 1730  * Code for bandwidth monitors
 1731  */
 1732 
 1733 /*
 1734  * Define common interface for timeval-related methods
 1735  */
 1736 #define BW_TIMEVALCMP(tvp, uvp, cmp) timevalcmp((tvp), (uvp), cmp)
 1737 #define BW_TIMEVALDECR(vvp, uvp) timevalsub((vvp), (uvp))
 1738 #define BW_TIMEVALADD(vvp, uvp) timevaladd((vvp), (uvp))
 1739 
 1740 static uint32_t
 1741 compute_bw_meter_flags(struct bw_upcall *req)
 1742 {
 1743     uint32_t flags = 0;
 1744 
 1745     if (req->bu_flags & BW_UPCALL_UNIT_PACKETS)
 1746         flags |= BW_METER_UNIT_PACKETS;
 1747     if (req->bu_flags & BW_UPCALL_UNIT_BYTES)
 1748         flags |= BW_METER_UNIT_BYTES;
 1749     if (req->bu_flags & BW_UPCALL_GEQ)
 1750         flags |= BW_METER_GEQ;
 1751     if (req->bu_flags & BW_UPCALL_LEQ)
 1752         flags |= BW_METER_LEQ;
 1753 
 1754     return flags;
 1755 }
 1756 
 1757 /*
 1758  * Add a bw_meter entry
 1759  */
 1760 static int
 1761 add_bw_upcall(struct bw_upcall *req)
 1762 {
 1763     struct mfc *mfc;
 1764     struct timeval delta = { BW_UPCALL_THRESHOLD_INTERVAL_MIN_SEC,
 1765                 BW_UPCALL_THRESHOLD_INTERVAL_MIN_USEC };
 1766     struct timeval now;
 1767     struct bw_meter *x;
 1768     uint32_t flags;
 1769 
 1770     if (!(V_mrt_api_config & MRT_MFC_BW_UPCALL))
 1771         return EOPNOTSUPP;
 1772 
 1773     /* Test if the flags are valid */
 1774     if (!(req->bu_flags & (BW_UPCALL_UNIT_PACKETS | BW_UPCALL_UNIT_BYTES)))
 1775         return EINVAL;
 1776     if (!(req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ)))
 1777         return EINVAL;
 1778     if ((req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ))
 1779             == (BW_UPCALL_GEQ | BW_UPCALL_LEQ))
 1780         return EINVAL;
 1781 
 1782     /* Test if the threshold time interval is valid */
 1783     if (BW_TIMEVALCMP(&req->bu_threshold.b_time, &delta, <))
 1784         return EINVAL;
 1785 
 1786     flags = compute_bw_meter_flags(req);
 1787 
 1788     /*
 1789      * Find if we have already same bw_meter entry
 1790      */
 1791     MFC_LOCK();
 1792     mfc = mfc_find(&req->bu_src, &req->bu_dst);
 1793     if (mfc == NULL) {
 1794         MFC_UNLOCK();
 1795         return EADDRNOTAVAIL;
 1796     }
 1797     for (x = mfc->mfc_bw_meter; x != NULL; x = x->bm_mfc_next) {
 1798         if ((BW_TIMEVALCMP(&x->bm_threshold.b_time,
 1799                            &req->bu_threshold.b_time, ==)) &&
 1800             (x->bm_threshold.b_packets == req->bu_threshold.b_packets) &&
 1801             (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) &&
 1802             (x->bm_flags & BW_METER_USER_FLAGS) == flags)  {
 1803             MFC_UNLOCK();
 1804             return 0;           /* XXX Already installed */
 1805         }
 1806     }
 1807 
 1808     /* Allocate the new bw_meter entry */
 1809     x = (struct bw_meter *)malloc(sizeof(*x), M_BWMETER, M_NOWAIT);
 1810     if (x == NULL) {
 1811         MFC_UNLOCK();
 1812         return ENOBUFS;
 1813     }
 1814 
 1815     /* Set the new bw_meter entry */
 1816     x->bm_threshold.b_time = req->bu_threshold.b_time;
 1817     microtime(&now);
 1818     x->bm_start_time = now;
 1819     x->bm_threshold.b_packets = req->bu_threshold.b_packets;
 1820     x->bm_threshold.b_bytes = req->bu_threshold.b_bytes;
 1821     x->bm_measured.b_packets = 0;
 1822     x->bm_measured.b_bytes = 0;
 1823     x->bm_flags = flags;
 1824     x->bm_time_next = NULL;
 1825     x->bm_time_hash = BW_METER_BUCKETS;
 1826 
 1827     /* Add the new bw_meter entry to the front of entries for this MFC */
 1828     x->bm_mfc = mfc;
 1829     x->bm_mfc_next = mfc->mfc_bw_meter;
 1830     mfc->mfc_bw_meter = x;
 1831     schedule_bw_meter(x, &now);
 1832     MFC_UNLOCK();
 1833 
 1834     return 0;
 1835 }
 1836 
 1837 static void
 1838 free_bw_list(struct bw_meter *list)
 1839 {
 1840     while (list != NULL) {
 1841         struct bw_meter *x = list;
 1842 
 1843         list = list->bm_mfc_next;
 1844         unschedule_bw_meter(x);
 1845         free(x, M_BWMETER);
 1846     }
 1847 }
 1848 
 1849 /*
 1850  * Delete one or multiple bw_meter entries
 1851  */
 1852 static int
 1853 del_bw_upcall(struct bw_upcall *req)
 1854 {
 1855     struct mfc *mfc;
 1856     struct bw_meter *x;
 1857 
 1858     if (!(V_mrt_api_config & MRT_MFC_BW_UPCALL))
 1859         return EOPNOTSUPP;
 1860 
 1861     MFC_LOCK();
 1862 
 1863     /* Find the corresponding MFC entry */
 1864     mfc = mfc_find(&req->bu_src, &req->bu_dst);
 1865     if (mfc == NULL) {
 1866         MFC_UNLOCK();
 1867         return EADDRNOTAVAIL;
 1868     } else if (req->bu_flags & BW_UPCALL_DELETE_ALL) {
 1869         /*
 1870          * Delete all bw_meter entries for this mfc
 1871          */
 1872         struct bw_meter *list;
 1873 
 1874         list = mfc->mfc_bw_meter;
 1875         mfc->mfc_bw_meter = NULL;
 1876         free_bw_list(list);
 1877         MFC_UNLOCK();
 1878         return 0;
 1879     } else {                    /* Delete a single bw_meter entry */
 1880         struct bw_meter *prev;
 1881         uint32_t flags = 0;
 1882 
 1883         flags = compute_bw_meter_flags(req);
 1884 
 1885         /* Find the bw_meter entry to delete */
 1886         for (prev = NULL, x = mfc->mfc_bw_meter; x != NULL;
 1887              prev = x, x = x->bm_mfc_next) {
 1888             if ((BW_TIMEVALCMP(&x->bm_threshold.b_time,
 1889                                &req->bu_threshold.b_time, ==)) &&
 1890                 (x->bm_threshold.b_packets == req->bu_threshold.b_packets) &&
 1891                 (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) &&
 1892                 (x->bm_flags & BW_METER_USER_FLAGS) == flags)
 1893                 break;
 1894         }
 1895         if (x != NULL) { /* Delete entry from the list for this MFC */
 1896             if (prev != NULL)
 1897                 prev->bm_mfc_next = x->bm_mfc_next;     /* remove from middle*/
 1898             else
 1899                 x->bm_mfc->mfc_bw_meter = x->bm_mfc_next;/* new head of list */
 1900 
 1901             unschedule_bw_meter(x);
 1902             MFC_UNLOCK();
 1903             /* Free the bw_meter entry */
 1904             free(x, M_BWMETER);
 1905             return 0;
 1906         } else {
 1907             MFC_UNLOCK();
 1908             return EINVAL;
 1909         }
 1910     }
 1911     /* NOTREACHED */
 1912 }
 1913 
 1914 /*
 1915  * Perform bandwidth measurement processing that may result in an upcall
 1916  */
 1917 static void
 1918 bw_meter_receive_packet(struct bw_meter *x, int plen, struct timeval *nowp)
 1919 {
 1920     struct timeval delta;
 1921 
 1922     MFC_LOCK_ASSERT();
 1923 
 1924     delta = *nowp;
 1925     BW_TIMEVALDECR(&delta, &x->bm_start_time);
 1926 
 1927     if (x->bm_flags & BW_METER_GEQ) {
 1928         /*
 1929          * Processing for ">=" type of bw_meter entry
 1930          */
 1931         if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) {
 1932             /* Reset the bw_meter entry */
 1933             x->bm_start_time = *nowp;
 1934             x->bm_measured.b_packets = 0;
 1935             x->bm_measured.b_bytes = 0;
 1936             x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
 1937         }
 1938 
 1939         /* Record that a packet is received */
 1940         x->bm_measured.b_packets++;
 1941         x->bm_measured.b_bytes += plen;
 1942 
 1943         /*
 1944          * Test if we should deliver an upcall
 1945          */
 1946         if (!(x->bm_flags & BW_METER_UPCALL_DELIVERED)) {
 1947             if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
 1948                  (x->bm_measured.b_packets >= x->bm_threshold.b_packets)) ||
 1949                 ((x->bm_flags & BW_METER_UNIT_BYTES) &&
 1950                  (x->bm_measured.b_bytes >= x->bm_threshold.b_bytes))) {
 1951                 /* Prepare an upcall for delivery */
 1952                 bw_meter_prepare_upcall(x, nowp);
 1953                 x->bm_flags |= BW_METER_UPCALL_DELIVERED;
 1954             }
 1955         }
 1956     } else if (x->bm_flags & BW_METER_LEQ) {
 1957         /*
 1958          * Processing for "<=" type of bw_meter entry
 1959          */
 1960         if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) {
 1961             /*
 1962              * We are behind time with the multicast forwarding table
 1963              * scanning for "<=" type of bw_meter entries, so test now
 1964              * if we should deliver an upcall.
 1965              */
 1966             if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
 1967                  (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) ||
 1968                 ((x->bm_flags & BW_METER_UNIT_BYTES) &&
 1969                  (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) {
 1970                 /* Prepare an upcall for delivery */
 1971                 bw_meter_prepare_upcall(x, nowp);
 1972             }
 1973             /* Reschedule the bw_meter entry */
 1974             unschedule_bw_meter(x);
 1975             schedule_bw_meter(x, nowp);
 1976         }
 1977 
 1978         /* Record that a packet is received */
 1979         x->bm_measured.b_packets++;
 1980         x->bm_measured.b_bytes += plen;
 1981 
 1982         /*
 1983          * Test if we should restart the measuring interval
 1984          */
 1985         if ((x->bm_flags & BW_METER_UNIT_PACKETS &&
 1986              x->bm_measured.b_packets <= x->bm_threshold.b_packets) ||
 1987             (x->bm_flags & BW_METER_UNIT_BYTES &&
 1988              x->bm_measured.b_bytes <= x->bm_threshold.b_bytes)) {
 1989             /* Don't restart the measuring interval */
 1990         } else {
 1991             /* Do restart the measuring interval */
 1992             /*
 1993              * XXX: note that we don't unschedule and schedule, because this
 1994              * might be too much overhead per packet. Instead, when we process
 1995              * all entries for a given timer hash bin, we check whether it is
 1996              * really a timeout. If not, we reschedule at that time.
 1997              */
 1998             x->bm_start_time = *nowp;
 1999             x->bm_measured.b_packets = 0;
 2000             x->bm_measured.b_bytes = 0;
 2001             x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
 2002         }
 2003     }
 2004 }
 2005 
 2006 /*
 2007  * Prepare a bandwidth-related upcall
 2008  */
 2009 static void
 2010 bw_meter_prepare_upcall(struct bw_meter *x, struct timeval *nowp)
 2011 {
 2012     struct timeval delta;
 2013     struct bw_upcall *u;
 2014 
 2015     MFC_LOCK_ASSERT();
 2016 
 2017     /*
 2018      * Compute the measured time interval
 2019      */
 2020     delta = *nowp;
 2021     BW_TIMEVALDECR(&delta, &x->bm_start_time);
 2022 
 2023     /*
 2024      * If there are too many pending upcalls, deliver them now
 2025      */
 2026     if (V_bw_upcalls_n >= BW_UPCALLS_MAX)
 2027         bw_upcalls_send();
 2028 
 2029     /*
 2030      * Set the bw_upcall entry
 2031      */
 2032     u = &V_bw_upcalls[V_bw_upcalls_n++];
 2033     u->bu_src = x->bm_mfc->mfc_origin;
 2034     u->bu_dst = x->bm_mfc->mfc_mcastgrp;
 2035     u->bu_threshold.b_time = x->bm_threshold.b_time;
 2036     u->bu_threshold.b_packets = x->bm_threshold.b_packets;
 2037     u->bu_threshold.b_bytes = x->bm_threshold.b_bytes;
 2038     u->bu_measured.b_time = delta;
 2039     u->bu_measured.b_packets = x->bm_measured.b_packets;
 2040     u->bu_measured.b_bytes = x->bm_measured.b_bytes;
 2041     u->bu_flags = 0;
 2042     if (x->bm_flags & BW_METER_UNIT_PACKETS)
 2043         u->bu_flags |= BW_UPCALL_UNIT_PACKETS;
 2044     if (x->bm_flags & BW_METER_UNIT_BYTES)
 2045         u->bu_flags |= BW_UPCALL_UNIT_BYTES;
 2046     if (x->bm_flags & BW_METER_GEQ)
 2047         u->bu_flags |= BW_UPCALL_GEQ;
 2048     if (x->bm_flags & BW_METER_LEQ)
 2049         u->bu_flags |= BW_UPCALL_LEQ;
 2050 }
 2051 
 2052 /*
 2053  * Send the pending bandwidth-related upcalls
 2054  */
 2055 static void
 2056 bw_upcalls_send(void)
 2057 {
 2058     struct mbuf *m;
 2059     int len = V_bw_upcalls_n * sizeof(V_bw_upcalls[0]);
 2060     struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
 2061     static struct igmpmsg igmpmsg = { 0,                /* unused1 */
 2062                                       0,                /* unused2 */
 2063                                       IGMPMSG_BW_UPCALL,/* im_msgtype */
 2064                                       0,                /* im_mbz  */
 2065                                       0,                /* im_vif  */
 2066                                       0,                /* unused3 */
 2067                                       { 0 },            /* im_src  */
 2068                                       { 0 } };          /* im_dst  */
 2069 
 2070     MFC_LOCK_ASSERT();
 2071 
 2072     if (V_bw_upcalls_n == 0)
 2073         return;                 /* No pending upcalls */
 2074 
 2075     V_bw_upcalls_n = 0;
 2076 
 2077     /*
 2078      * Allocate a new mbuf, initialize it with the header and
 2079      * the payload for the pending calls.
 2080      */
 2081     MGETHDR(m, M_DONTWAIT, MT_DATA);
 2082     if (m == NULL) {
 2083         log(LOG_WARNING, "bw_upcalls_send: cannot allocate mbuf\n");
 2084         return;
 2085     }
 2086 
 2087     m->m_len = m->m_pkthdr.len = 0;
 2088     m_copyback(m, 0, sizeof(struct igmpmsg), (caddr_t)&igmpmsg);
 2089     m_copyback(m, sizeof(struct igmpmsg), len, (caddr_t)&V_bw_upcalls[0]);
 2090 
 2091     /*
 2092      * Send the upcalls
 2093      * XXX do we need to set the address in k_igmpsrc ?
 2094      */
 2095     MRTSTAT_INC(mrts_upcalls);
 2096     if (socket_send(V_ip_mrouter, m, &k_igmpsrc) < 0) {
 2097         log(LOG_WARNING, "bw_upcalls_send: ip_mrouter socket queue full\n");
 2098         MRTSTAT_INC(mrts_upq_sockfull);
 2099     }
 2100 }
 2101 
 2102 /*
 2103  * Compute the timeout hash value for the bw_meter entries
 2104  */
 2105 #define BW_METER_TIMEHASH(bw_meter, hash)                               \
 2106     do {                                                                \
 2107         struct timeval next_timeval = (bw_meter)->bm_start_time;        \
 2108                                                                         \
 2109         BW_TIMEVALADD(&next_timeval, &(bw_meter)->bm_threshold.b_time); \
 2110         (hash) = next_timeval.tv_sec;                                   \
 2111         if (next_timeval.tv_usec)                                       \
 2112             (hash)++; /* XXX: make sure we don't timeout early */       \
 2113         (hash) %= BW_METER_BUCKETS;                                     \
 2114     } while (0)
 2115 
 2116 /*
 2117  * Schedule a timer to process periodically bw_meter entry of type "<="
 2118  * by linking the entry in the proper hash bucket.
 2119  */
 2120 static void
 2121 schedule_bw_meter(struct bw_meter *x, struct timeval *nowp)
 2122 {
 2123     int time_hash;
 2124 
 2125     MFC_LOCK_ASSERT();
 2126 
 2127     if (!(x->bm_flags & BW_METER_LEQ))
 2128         return;         /* XXX: we schedule timers only for "<=" entries */
 2129 
 2130     /*
 2131      * Reset the bw_meter entry
 2132      */
 2133     x->bm_start_time = *nowp;
 2134     x->bm_measured.b_packets = 0;
 2135     x->bm_measured.b_bytes = 0;
 2136     x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
 2137 
 2138     /*
 2139      * Compute the timeout hash value and insert the entry
 2140      */
 2141     BW_METER_TIMEHASH(x, time_hash);
 2142     x->bm_time_next = V_bw_meter_timers[time_hash];
 2143     V_bw_meter_timers[time_hash] = x;
 2144     x->bm_time_hash = time_hash;
 2145 }
 2146 
 2147 /*
 2148  * Unschedule the periodic timer that processes bw_meter entry of type "<="
 2149  * by removing the entry from the proper hash bucket.
 2150  */
 2151 static void
 2152 unschedule_bw_meter(struct bw_meter *x)
 2153 {
 2154     int time_hash;
 2155     struct bw_meter *prev, *tmp;
 2156 
 2157     MFC_LOCK_ASSERT();
 2158 
 2159     if (!(x->bm_flags & BW_METER_LEQ))
 2160         return;         /* XXX: we schedule timers only for "<=" entries */
 2161 
 2162     /*
 2163      * Compute the timeout hash value and delete the entry
 2164      */
 2165     time_hash = x->bm_time_hash;
 2166     if (time_hash >= BW_METER_BUCKETS)
 2167         return;         /* Entry was not scheduled */
 2168 
 2169     for (prev = NULL, tmp = V_bw_meter_timers[time_hash];
 2170              tmp != NULL; prev = tmp, tmp = tmp->bm_time_next)
 2171         if (tmp == x)
 2172             break;
 2173 
 2174     if (tmp == NULL)
 2175         panic("unschedule_bw_meter: bw_meter entry not found");
 2176 
 2177     if (prev != NULL)
 2178         prev->bm_time_next = x->bm_time_next;
 2179     else
 2180         V_bw_meter_timers[time_hash] = x->bm_time_next;
 2181 
 2182     x->bm_time_next = NULL;
 2183     x->bm_time_hash = BW_METER_BUCKETS;
 2184 }
 2185 
 2186 
 2187 /*
 2188  * Process all "<=" type of bw_meter that should be processed now,
 2189  * and for each entry prepare an upcall if necessary. Each processed
 2190  * entry is rescheduled again for the (periodic) processing.
 2191  *
 2192  * This is run periodically (once per second normally). On each round,
 2193  * all the potentially matching entries are in the hash slot that we are
 2194  * looking at.
 2195  */
 2196 static void
 2197 bw_meter_process()
 2198 {
 2199     uint32_t loops;
 2200     int i;
 2201     struct timeval now, process_endtime;
 2202 
 2203     microtime(&now);
 2204     if (V_last_tv_sec == now.tv_sec)
 2205         return;         /* nothing to do */
 2206 
 2207     loops = now.tv_sec - V_last_tv_sec;
 2208     V_last_tv_sec = now.tv_sec;
 2209     if (loops > BW_METER_BUCKETS)
 2210         loops = BW_METER_BUCKETS;
 2211 
 2212     MFC_LOCK();
 2213     /*
 2214      * Process all bins of bw_meter entries from the one after the last
 2215      * processed to the current one. On entry, i points to the last bucket
 2216      * visited, so we need to increment i at the beginning of the loop.
 2217      */
 2218     for (i = (now.tv_sec - loops) % BW_METER_BUCKETS; loops > 0; loops--) {
 2219         struct bw_meter *x, *tmp_list;
 2220 
 2221         if (++i >= BW_METER_BUCKETS)
 2222             i = 0;
 2223 
 2224         /* Disconnect the list of bw_meter entries from the bin */
 2225         tmp_list = V_bw_meter_timers[i];
 2226         V_bw_meter_timers[i] = NULL;
 2227 
 2228         /* Process the list of bw_meter entries */
 2229         while (tmp_list != NULL) {
 2230             x = tmp_list;
 2231             tmp_list = tmp_list->bm_time_next;
 2232 
 2233             /* Test if the time interval is over */
 2234             process_endtime = x->bm_start_time;
 2235             BW_TIMEVALADD(&process_endtime, &x->bm_threshold.b_time);
 2236             if (BW_TIMEVALCMP(&process_endtime, &now, >)) {
 2237                 /* Not yet: reschedule, but don't reset */
 2238                 int time_hash;
 2239 
 2240                 BW_METER_TIMEHASH(x, time_hash);
 2241                 if (time_hash == i && process_endtime.tv_sec == now.tv_sec) {
 2242                     /*
 2243                      * XXX: somehow the bin processing is a bit ahead of time.
 2244                      * Put the entry in the next bin.
 2245                      */
 2246                     if (++time_hash >= BW_METER_BUCKETS)
 2247                         time_hash = 0;
 2248                 }
 2249                 x->bm_time_next = V_bw_meter_timers[time_hash];
 2250                 V_bw_meter_timers[time_hash] = x;
 2251                 x->bm_time_hash = time_hash;
 2252 
 2253                 continue;
 2254             }
 2255 
 2256             /*
 2257              * Test if we should deliver an upcall
 2258              */
 2259             if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
 2260                  (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) ||
 2261                 ((x->bm_flags & BW_METER_UNIT_BYTES) &&
 2262                  (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) {
 2263                 /* Prepare an upcall for delivery */
 2264                 bw_meter_prepare_upcall(x, &now);
 2265             }
 2266 
 2267             /*
 2268              * Reschedule for next processing
 2269              */
 2270             schedule_bw_meter(x, &now);
 2271         }
 2272     }
 2273 
 2274     /* Send all upcalls that are pending delivery */
 2275     bw_upcalls_send();
 2276 
 2277     MFC_UNLOCK();
 2278 }
 2279 
 2280 /*
 2281  * A periodic function for sending all upcalls that are pending delivery
 2282  */
 2283 static void
 2284 expire_bw_upcalls_send(void *arg)
 2285 {
 2286     CURVNET_SET((struct vnet *) arg);
 2287 
 2288     MFC_LOCK();
 2289     bw_upcalls_send();
 2290     MFC_UNLOCK();
 2291 
 2292     callout_reset(&V_bw_upcalls_ch, BW_UPCALLS_PERIOD, expire_bw_upcalls_send,
 2293         curvnet);
 2294     CURVNET_RESTORE();
 2295 }
 2296 
 2297 /*
 2298  * A periodic function for periodic scanning of the multicast forwarding
 2299  * table for processing all "<=" bw_meter entries.
 2300  */
 2301 static void
 2302 expire_bw_meter_process(void *arg)
 2303 {
 2304     CURVNET_SET((struct vnet *) arg);
 2305 
 2306     if (V_mrt_api_config & MRT_MFC_BW_UPCALL)
 2307         bw_meter_process();
 2308 
 2309     callout_reset(&V_bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process,
 2310         curvnet);
 2311     CURVNET_RESTORE();
 2312 }
 2313 
 2314 /*
 2315  * End of bandwidth monitoring code
 2316  */
 2317 
 2318 /*
 2319  * Send the packet up to the user daemon, or eventually do kernel encapsulation
 2320  *
 2321  */
 2322 static int
 2323 pim_register_send(struct ip *ip, struct vif *vifp, struct mbuf *m,
 2324     struct mfc *rt)
 2325 {
 2326     struct mbuf *mb_copy, *mm;
 2327 
 2328     /*
 2329      * Do not send IGMP_WHOLEPKT notifications to userland, if the
 2330      * rendezvous point was unspecified, and we were told not to.
 2331      */
 2332     if (pim_squelch_wholepkt != 0 && (V_mrt_api_config & MRT_MFC_RP) &&
 2333         in_nullhost(rt->mfc_rp))
 2334         return 0;
 2335 
 2336     mb_copy = pim_register_prepare(ip, m);
 2337     if (mb_copy == NULL)
 2338         return ENOBUFS;
 2339 
 2340     /*
 2341      * Send all the fragments. Note that the mbuf for each fragment
 2342      * is freed by the sending machinery.
 2343      */
 2344     for (mm = mb_copy; mm; mm = mb_copy) {
 2345         mb_copy = mm->m_nextpkt;
 2346         mm->m_nextpkt = 0;
 2347         mm = m_pullup(mm, sizeof(struct ip));
 2348         if (mm != NULL) {
 2349             ip = mtod(mm, struct ip *);
 2350             if ((V_mrt_api_config & MRT_MFC_RP) && !in_nullhost(rt->mfc_rp)) {
 2351                 pim_register_send_rp(ip, vifp, mm, rt);
 2352             } else {
 2353                 pim_register_send_upcall(ip, vifp, mm, rt);
 2354             }
 2355         }
 2356     }
 2357 
 2358     return 0;
 2359 }
 2360 
 2361 /*
 2362  * Return a copy of the data packet that is ready for PIM Register
 2363  * encapsulation.
 2364  * XXX: Note that in the returned copy the IP header is a valid one.
 2365  */
 2366 static struct mbuf *
 2367 pim_register_prepare(struct ip *ip, struct mbuf *m)
 2368 {
 2369     struct mbuf *mb_copy = NULL;
 2370     int mtu;
 2371 
 2372     /* Take care of delayed checksums */
 2373     if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
 2374         in_delayed_cksum(m);
 2375         m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
 2376     }
 2377 
 2378     /*
 2379      * Copy the old packet & pullup its IP header into the
 2380      * new mbuf so we can modify it.
 2381      */
 2382     mb_copy = m_copypacket(m, M_DONTWAIT);
 2383     if (mb_copy == NULL)
 2384         return NULL;
 2385     mb_copy = m_pullup(mb_copy, ip->ip_hl << 2);
 2386     if (mb_copy == NULL)
 2387         return NULL;
 2388 
 2389     /* take care of the TTL */
 2390     ip = mtod(mb_copy, struct ip *);
 2391     --ip->ip_ttl;
 2392 
 2393     /* Compute the MTU after the PIM Register encapsulation */
 2394     mtu = 0xffff - sizeof(pim_encap_iphdr) - sizeof(pim_encap_pimhdr);
 2395 
 2396     if (ip->ip_len <= mtu) {
 2397         /* Turn the IP header into a valid one */
 2398         ip->ip_len = htons(ip->ip_len);
 2399         ip->ip_off = htons(ip->ip_off);
 2400         ip->ip_sum = 0;
 2401         ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2);
 2402     } else {
 2403         /* Fragment the packet */
 2404         if (ip_fragment(ip, &mb_copy, mtu, 0, CSUM_DELAY_IP) != 0) {
 2405             m_freem(mb_copy);
 2406             return NULL;
 2407         }
 2408     }
 2409     return mb_copy;
 2410 }
 2411 
 2412 /*
 2413  * Send an upcall with the data packet to the user-level process.
 2414  */
 2415 static int
 2416 pim_register_send_upcall(struct ip *ip, struct vif *vifp,
 2417     struct mbuf *mb_copy, struct mfc *rt)
 2418 {
 2419     struct mbuf *mb_first;
 2420     int len = ntohs(ip->ip_len);
 2421     struct igmpmsg *im;
 2422     struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
 2423 
 2424     VIF_LOCK_ASSERT();
 2425 
 2426     /*
 2427      * Add a new mbuf with an upcall header
 2428      */
 2429     MGETHDR(mb_first, M_DONTWAIT, MT_DATA);
 2430     if (mb_first == NULL) {
 2431         m_freem(mb_copy);
 2432         return ENOBUFS;
 2433     }
 2434     mb_first->m_data += max_linkhdr;
 2435     mb_first->m_pkthdr.len = len + sizeof(struct igmpmsg);
 2436     mb_first->m_len = sizeof(struct igmpmsg);
 2437     mb_first->m_next = mb_copy;
 2438 
 2439     /* Send message to routing daemon */
 2440     im = mtod(mb_first, struct igmpmsg *);
 2441     im->im_msgtype      = IGMPMSG_WHOLEPKT;
 2442     im->im_mbz          = 0;
 2443     im->im_vif          = vifp - V_viftable;
 2444     im->im_src          = ip->ip_src;
 2445     im->im_dst          = ip->ip_dst;
 2446 
 2447     k_igmpsrc.sin_addr  = ip->ip_src;
 2448 
 2449     MRTSTAT_INC(mrts_upcalls);
 2450 
 2451     if (socket_send(V_ip_mrouter, mb_first, &k_igmpsrc) < 0) {
 2452         CTR1(KTR_IPMF, "%s: socket queue full", __func__);
 2453         MRTSTAT_INC(mrts_upq_sockfull);
 2454         return ENOBUFS;
 2455     }
 2456 
 2457     /* Keep statistics */
 2458     PIMSTAT_INC(pims_snd_registers_msgs);
 2459     PIMSTAT_ADD(pims_snd_registers_bytes, len);
 2460 
 2461     return 0;
 2462 }
 2463 
 2464 /*
 2465  * Encapsulate the data packet in PIM Register message and send it to the RP.
 2466  */
 2467 static int
 2468 pim_register_send_rp(struct ip *ip, struct vif *vifp, struct mbuf *mb_copy,
 2469     struct mfc *rt)
 2470 {
 2471     struct mbuf *mb_first;
 2472     struct ip *ip_outer;
 2473     struct pim_encap_pimhdr *pimhdr;
 2474     int len = ntohs(ip->ip_len);
 2475     vifi_t vifi = rt->mfc_parent;
 2476 
 2477     VIF_LOCK_ASSERT();
 2478 
 2479     if ((vifi >= V_numvifs) || in_nullhost(V_viftable[vifi].v_lcl_addr)) {
 2480         m_freem(mb_copy);
 2481         return EADDRNOTAVAIL;           /* The iif vif is invalid */
 2482     }
 2483 
 2484     /*
 2485      * Add a new mbuf with the encapsulating header
 2486      */
 2487     MGETHDR(mb_first, M_DONTWAIT, MT_DATA);
 2488     if (mb_first == NULL) {
 2489         m_freem(mb_copy);
 2490         return ENOBUFS;
 2491     }
 2492     mb_first->m_data += max_linkhdr;
 2493     mb_first->m_len = sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr);
 2494     mb_first->m_next = mb_copy;
 2495 
 2496     mb_first->m_pkthdr.len = len + mb_first->m_len;
 2497 
 2498     /*
 2499      * Fill in the encapsulating IP and PIM header
 2500      */
 2501     ip_outer = mtod(mb_first, struct ip *);
 2502     *ip_outer = pim_encap_iphdr;
 2503     ip_outer->ip_id = ip_newid();
 2504     ip_outer->ip_len = len + sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr);
 2505     ip_outer->ip_src = V_viftable[vifi].v_lcl_addr;
 2506     ip_outer->ip_dst = rt->mfc_rp;
 2507     /*
 2508      * Copy the inner header TOS to the outer header, and take care of the
 2509      * IP_DF bit.
 2510      */
 2511     ip_outer->ip_tos = ip->ip_tos;
 2512     if (ntohs(ip->ip_off) & IP_DF)
 2513         ip_outer->ip_off |= IP_DF;
 2514     pimhdr = (struct pim_encap_pimhdr *)((caddr_t)ip_outer
 2515                                          + sizeof(pim_encap_iphdr));
 2516     *pimhdr = pim_encap_pimhdr;
 2517     /* If the iif crosses a border, set the Border-bit */
 2518     if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_BORDER_VIF & V_mrt_api_config)
 2519         pimhdr->flags |= htonl(PIM_BORDER_REGISTER);
 2520 
 2521     mb_first->m_data += sizeof(pim_encap_iphdr);
 2522     pimhdr->pim.pim_cksum = in_cksum(mb_first, sizeof(pim_encap_pimhdr));
 2523     mb_first->m_data -= sizeof(pim_encap_iphdr);
 2524 
 2525     send_packet(vifp, mb_first);
 2526 
 2527     /* Keep statistics */
 2528     PIMSTAT_INC(pims_snd_registers_msgs);
 2529     PIMSTAT_ADD(pims_snd_registers_bytes, len);
 2530 
 2531     return 0;
 2532 }
 2533 
 2534 /*
 2535  * pim_encapcheck() is called by the encap4_input() path at runtime to
 2536  * determine if a packet is for PIM; allowing PIM to be dynamically loaded
 2537  * into the kernel.
 2538  */
 2539 static int
 2540 pim_encapcheck(const struct mbuf *m, int off, int proto, void *arg)
 2541 {
 2542 
 2543 #ifdef DIAGNOSTIC
 2544     KASSERT(proto == IPPROTO_PIM, ("not for IPPROTO_PIM"));
 2545 #endif
 2546     if (proto != IPPROTO_PIM)
 2547         return 0;       /* not for us; reject the datagram. */
 2548 
 2549     return 64;          /* claim the datagram. */
 2550 }
 2551 
 2552 /*
 2553  * PIM-SMv2 and PIM-DM messages processing.
 2554  * Receives and verifies the PIM control messages, and passes them
 2555  * up to the listening socket, using rip_input().
 2556  * The only message with special processing is the PIM_REGISTER message
 2557  * (used by PIM-SM): the PIM header is stripped off, and the inner packet
 2558  * is passed to if_simloop().
 2559  */
 2560 void
 2561 pim_input(struct mbuf *m, int off)
 2562 {
 2563     struct ip *ip = mtod(m, struct ip *);
 2564     struct pim *pim;
 2565     int minlen;
 2566     int datalen = ip->ip_len;
 2567     int ip_tos;
 2568     int iphlen = off;
 2569 
 2570     /* Keep statistics */
 2571     PIMSTAT_INC(pims_rcv_total_msgs);
 2572     PIMSTAT_ADD(pims_rcv_total_bytes, datalen);
 2573 
 2574     /*
 2575      * Validate lengths
 2576      */
 2577     if (datalen < PIM_MINLEN) {
 2578         PIMSTAT_INC(pims_rcv_tooshort);
 2579         CTR3(KTR_IPMF, "%s: short packet (%d) from %s",
 2580             __func__, datalen, inet_ntoa(ip->ip_src));
 2581         m_freem(m);
 2582         return;
 2583     }
 2584 
 2585     /*
 2586      * If the packet is at least as big as a REGISTER, go agead
 2587      * and grab the PIM REGISTER header size, to avoid another
 2588      * possible m_pullup() later.
 2589      *
 2590      * PIM_MINLEN       == pimhdr + u_int32_t == 4 + 4 = 8
 2591      * PIM_REG_MINLEN   == pimhdr + reghdr + encap_iphdr == 4 + 4 + 20 = 28
 2592      */
 2593     minlen = iphlen + (datalen >= PIM_REG_MINLEN ? PIM_REG_MINLEN : PIM_MINLEN);
 2594     /*
 2595      * Get the IP and PIM headers in contiguous memory, and
 2596      * possibly the PIM REGISTER header.
 2597      */
 2598     if ((m->m_flags & M_EXT || m->m_len < minlen) &&
 2599         (m = m_pullup(m, minlen)) == 0) {
 2600         CTR1(KTR_IPMF, "%s: m_pullup() failed", __func__);
 2601         return;
 2602     }
 2603 
 2604     /* m_pullup() may have given us a new mbuf so reset ip. */
 2605     ip = mtod(m, struct ip *);
 2606     ip_tos = ip->ip_tos;
 2607 
 2608     /* adjust mbuf to point to the PIM header */
 2609     m->m_data += iphlen;
 2610     m->m_len  -= iphlen;
 2611     pim = mtod(m, struct pim *);
 2612 
 2613     /*
 2614      * Validate checksum. If PIM REGISTER, exclude the data packet.
 2615      *
 2616      * XXX: some older PIMv2 implementations don't make this distinction,
 2617      * so for compatibility reason perform the checksum over part of the
 2618      * message, and if error, then over the whole message.
 2619      */
 2620     if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER && in_cksum(m, PIM_MINLEN) == 0) {
 2621         /* do nothing, checksum okay */
 2622     } else if (in_cksum(m, datalen)) {
 2623         PIMSTAT_INC(pims_rcv_badsum);
 2624         CTR1(KTR_IPMF, "%s: invalid checksum", __func__);
 2625         m_freem(m);
 2626         return;
 2627     }
 2628 
 2629     /* PIM version check */
 2630     if (PIM_VT_V(pim->pim_vt) < PIM_VERSION) {
 2631         PIMSTAT_INC(pims_rcv_badversion);
 2632         CTR3(KTR_IPMF, "%s: bad version %d expect %d", __func__,
 2633             (int)PIM_VT_V(pim->pim_vt), PIM_VERSION);
 2634         m_freem(m);
 2635         return;
 2636     }
 2637 
 2638     /* restore mbuf back to the outer IP */
 2639     m->m_data -= iphlen;
 2640     m->m_len  += iphlen;
 2641 
 2642     if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER) {
 2643         /*
 2644          * Since this is a REGISTER, we'll make a copy of the register
 2645          * headers ip + pim + u_int32 + encap_ip, to be passed up to the
 2646          * routing daemon.
 2647          */
 2648         struct sockaddr_in dst = { sizeof(dst), AF_INET };
 2649         struct mbuf *mcp;
 2650         struct ip *encap_ip;
 2651         u_int32_t *reghdr;
 2652         struct ifnet *vifp;
 2653 
 2654         VIF_LOCK();
 2655         if ((V_reg_vif_num >= V_numvifs) || (V_reg_vif_num == VIFI_INVALID)) {
 2656             VIF_UNLOCK();
 2657             CTR2(KTR_IPMF, "%s: register vif not set: %d", __func__,
 2658                 (int)V_reg_vif_num);
 2659             m_freem(m);
 2660             return;
 2661         }
 2662         /* XXX need refcnt? */
 2663         vifp = V_viftable[V_reg_vif_num].v_ifp;
 2664         VIF_UNLOCK();
 2665 
 2666         /*
 2667          * Validate length
 2668          */
 2669         if (datalen < PIM_REG_MINLEN) {
 2670             PIMSTAT_INC(pims_rcv_tooshort);
 2671             PIMSTAT_INC(pims_rcv_badregisters);
 2672             CTR1(KTR_IPMF, "%s: register packet size too small", __func__);
 2673             m_freem(m);
 2674             return;
 2675         }
 2676 
 2677         reghdr = (u_int32_t *)(pim + 1);
 2678         encap_ip = (struct ip *)(reghdr + 1);
 2679 
 2680         CTR3(KTR_IPMF, "%s: register: encap ip src %s len %d",
 2681             __func__, inet_ntoa(encap_ip->ip_src), ntohs(encap_ip->ip_len));
 2682 
 2683         /* verify the version number of the inner packet */
 2684         if (encap_ip->ip_v != IPVERSION) {
 2685             PIMSTAT_INC(pims_rcv_badregisters);
 2686             CTR1(KTR_IPMF, "%s: bad encap ip version", __func__);
 2687             m_freem(m);
 2688             return;
 2689         }
 2690 
 2691         /* verify the inner packet is destined to a mcast group */
 2692         if (!IN_MULTICAST(ntohl(encap_ip->ip_dst.s_addr))) {
 2693             PIMSTAT_INC(pims_rcv_badregisters);
 2694             CTR2(KTR_IPMF, "%s: bad encap ip dest %s", __func__,
 2695                 inet_ntoa(encap_ip->ip_dst));
 2696             m_freem(m);
 2697             return;
 2698         }
 2699 
 2700         /* If a NULL_REGISTER, pass it to the daemon */
 2701         if ((ntohl(*reghdr) & PIM_NULL_REGISTER))
 2702             goto pim_input_to_daemon;
 2703 
 2704         /*
 2705          * Copy the TOS from the outer IP header to the inner IP header.
 2706          */
 2707         if (encap_ip->ip_tos != ip_tos) {
 2708             /* Outer TOS -> inner TOS */
 2709             encap_ip->ip_tos = ip_tos;
 2710             /* Recompute the inner header checksum. Sigh... */
 2711 
 2712             /* adjust mbuf to point to the inner IP header */
 2713             m->m_data += (iphlen + PIM_MINLEN);
 2714             m->m_len  -= (iphlen + PIM_MINLEN);
 2715 
 2716             encap_ip->ip_sum = 0;
 2717             encap_ip->ip_sum = in_cksum(m, encap_ip->ip_hl << 2);
 2718 
 2719             /* restore mbuf to point back to the outer IP header */
 2720             m->m_data -= (iphlen + PIM_MINLEN);
 2721             m->m_len  += (iphlen + PIM_MINLEN);
 2722         }
 2723 
 2724         /*
 2725          * Decapsulate the inner IP packet and loopback to forward it
 2726          * as a normal multicast packet. Also, make a copy of the
 2727          *     outer_iphdr + pimhdr + reghdr + encap_iphdr
 2728          * to pass to the daemon later, so it can take the appropriate
 2729          * actions (e.g., send back PIM_REGISTER_STOP).
 2730          * XXX: here m->m_data points to the outer IP header.
 2731          */
 2732         mcp = m_copy(m, 0, iphlen + PIM_REG_MINLEN);
 2733         if (mcp == NULL) {
 2734             CTR1(KTR_IPMF, "%s: m_copy() failed", __func__);
 2735             m_freem(m);
 2736             return;
 2737         }
 2738 
 2739         /* Keep statistics */
 2740         /* XXX: registers_bytes include only the encap. mcast pkt */
 2741         PIMSTAT_INC(pims_rcv_registers_msgs);
 2742         PIMSTAT_ADD(pims_rcv_registers_bytes, ntohs(encap_ip->ip_len));
 2743 
 2744         /*
 2745          * forward the inner ip packet; point m_data at the inner ip.
 2746          */
 2747         m_adj(m, iphlen + PIM_MINLEN);
 2748 
 2749         CTR4(KTR_IPMF,
 2750             "%s: forward decap'd REGISTER: src %lx dst %lx vif %d",
 2751             __func__,
 2752             (u_long)ntohl(encap_ip->ip_src.s_addr),
 2753             (u_long)ntohl(encap_ip->ip_dst.s_addr),
 2754             (int)V_reg_vif_num);
 2755 
 2756         /* NB: vifp was collected above; can it change on us? */
 2757         if_simloop(vifp, m, dst.sin_family, 0);
 2758 
 2759         /* prepare the register head to send to the mrouting daemon */
 2760         m = mcp;
 2761     }
 2762 
 2763 pim_input_to_daemon:
 2764     /*
 2765      * Pass the PIM message up to the daemon; if it is a Register message,
 2766      * pass the 'head' only up to the daemon. This includes the
 2767      * outer IP header, PIM header, PIM-Register header and the
 2768      * inner IP header.
 2769      * XXX: the outer IP header pkt size of a Register is not adjust to
 2770      * reflect the fact that the inner multicast data is truncated.
 2771      */
 2772     rip_input(m, iphlen);
 2773 
 2774     return;
 2775 }
 2776 
 2777 static int
 2778 sysctl_mfctable(SYSCTL_HANDLER_ARGS)
 2779 {
 2780         struct mfc      *rt;
 2781         int              error, i;
 2782 
 2783         if (req->newptr)
 2784                 return (EPERM);
 2785         if (V_mfchashtbl == NULL)       /* XXX unlocked */
 2786                 return (0);
 2787         error = sysctl_wire_old_buffer(req, 0);
 2788         if (error)
 2789                 return (error);
 2790 
 2791         MFC_LOCK();
 2792         for (i = 0; i < mfchashsize; i++) {
 2793                 LIST_FOREACH(rt, &V_mfchashtbl[i], mfc_hash) {
 2794                         error = SYSCTL_OUT(req, rt, sizeof(struct mfc));
 2795                         if (error)
 2796                                 goto out_locked;
 2797                 }
 2798         }
 2799 out_locked:
 2800         MFC_UNLOCK();
 2801         return (error);
 2802 }
 2803 
 2804 static SYSCTL_NODE(_net_inet_ip, OID_AUTO, mfctable, CTLFLAG_RD,
 2805     sysctl_mfctable, "IPv4 Multicast Forwarding Table "
 2806     "(struct *mfc[mfchashsize], netinet/ip_mroute.h)");
 2807 
 2808 static void
 2809 vnet_mroute_init(const void *unused __unused)
 2810 {
 2811 
 2812         MALLOC(V_nexpire, u_char *, mfchashsize, M_MRTABLE, M_WAITOK|M_ZERO);
 2813         bzero(V_bw_meter_timers, sizeof(V_bw_meter_timers));
 2814         callout_init(&V_expire_upcalls_ch, CALLOUT_MPSAFE);
 2815         callout_init(&V_bw_upcalls_ch, CALLOUT_MPSAFE);
 2816         callout_init(&V_bw_meter_ch, CALLOUT_MPSAFE);
 2817 }
 2818 
 2819 VNET_SYSINIT(vnet_mroute_init, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_mroute_init,
 2820         NULL);
 2821 
 2822 static void
 2823 vnet_mroute_uninit(const void *unused __unused)
 2824 {
 2825 
 2826         FREE(V_nexpire, M_MRTABLE);
 2827         V_nexpire = NULL;
 2828 }
 2829 
 2830 VNET_SYSUNINIT(vnet_mroute_uninit, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, 
 2831         vnet_mroute_uninit, NULL);
 2832 
 2833 static int
 2834 ip_mroute_modevent(module_t mod, int type, void *unused)
 2835 {
 2836 
 2837     switch (type) {
 2838     case MOD_LOAD:
 2839         MROUTER_LOCK_INIT();
 2840 
 2841         if_detach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 
 2842             if_detached_event, NULL, EVENTHANDLER_PRI_ANY);
 2843         if (if_detach_event_tag == NULL) {
 2844                 printf("ip_mroute: unable to register "
 2845                     "ifnet_departure_event handler\n");
 2846                 MROUTER_LOCK_DESTROY();
 2847                 return (EINVAL);
 2848         }
 2849 
 2850         MFC_LOCK_INIT();
 2851         VIF_LOCK_INIT();
 2852 
 2853         mfchashsize = MFCHASHSIZE;
 2854         if (TUNABLE_ULONG_FETCH("net.inet.ip.mfchashsize", &mfchashsize) &&
 2855             !powerof2(mfchashsize)) {
 2856                 printf("WARNING: %s not a power of 2; using default\n",
 2857                     "net.inet.ip.mfchashsize");
 2858                 mfchashsize = MFCHASHSIZE;
 2859         }
 2860 
 2861         pim_squelch_wholepkt = 0;
 2862         TUNABLE_ULONG_FETCH("net.inet.pim.squelch_wholepkt",
 2863             &pim_squelch_wholepkt);
 2864 
 2865         pim_encap_cookie = encap_attach_func(AF_INET, IPPROTO_PIM,
 2866             pim_encapcheck, &in_pim_protosw, NULL);
 2867         if (pim_encap_cookie == NULL) {
 2868                 printf("ip_mroute: unable to attach pim encap\n");
 2869                 VIF_LOCK_DESTROY();
 2870                 MFC_LOCK_DESTROY();
 2871                 MROUTER_LOCK_DESTROY();
 2872                 return (EINVAL);
 2873         }
 2874 
 2875         ip_mcast_src = X_ip_mcast_src;
 2876         ip_mforward = X_ip_mforward;
 2877         ip_mrouter_done = X_ip_mrouter_done;
 2878         ip_mrouter_get = X_ip_mrouter_get;
 2879         ip_mrouter_set = X_ip_mrouter_set;
 2880 
 2881         ip_rsvp_force_done = X_ip_rsvp_force_done;
 2882         ip_rsvp_vif = X_ip_rsvp_vif;
 2883 
 2884         legal_vif_num = X_legal_vif_num;
 2885         mrt_ioctl = X_mrt_ioctl;
 2886         rsvp_input_p = X_rsvp_input;
 2887         break;
 2888 
 2889     case MOD_UNLOAD:
 2890         /*
 2891          * Typically module unload happens after the user-level
 2892          * process has shutdown the kernel services (the check
 2893          * below insures someone can't just yank the module out
 2894          * from under a running process).  But if the module is
 2895          * just loaded and then unloaded w/o starting up a user
 2896          * process we still need to cleanup.
 2897          */
 2898         MROUTER_LOCK();
 2899         if (ip_mrouter_cnt != 0) {
 2900             MROUTER_UNLOCK();
 2901             return (EINVAL);
 2902         }
 2903         ip_mrouter_unloading = 1;
 2904         MROUTER_UNLOCK();
 2905 
 2906         EVENTHANDLER_DEREGISTER(ifnet_departure_event, if_detach_event_tag);
 2907 
 2908         if (pim_encap_cookie) {
 2909             encap_detach(pim_encap_cookie);
 2910             pim_encap_cookie = NULL;
 2911         }
 2912 
 2913         ip_mcast_src = NULL;
 2914         ip_mforward = NULL;
 2915         ip_mrouter_done = NULL;
 2916         ip_mrouter_get = NULL;
 2917         ip_mrouter_set = NULL;
 2918 
 2919         ip_rsvp_force_done = NULL;
 2920         ip_rsvp_vif = NULL;
 2921 
 2922         legal_vif_num = NULL;
 2923         mrt_ioctl = NULL;
 2924         rsvp_input_p = NULL;
 2925 
 2926         VIF_LOCK_DESTROY();
 2927         MFC_LOCK_DESTROY();
 2928         MROUTER_LOCK_DESTROY();
 2929         break;
 2930 
 2931     default:
 2932         return EOPNOTSUPP;
 2933     }
 2934     return 0;
 2935 }
 2936 
 2937 static moduledata_t ip_mroutemod = {
 2938     "ip_mroute",
 2939     ip_mroute_modevent,
 2940     0
 2941 };
 2942 
 2943 DECLARE_MODULE(ip_mroute, ip_mroutemod, SI_SUB_PSEUDO, SI_ORDER_MIDDLE);

Cache object: ede4b3c6f1c15e0ef321930588dbedbe


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.