The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet/ip_mroute.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1989 Stephen Deering
    3  * Copyright (c) 1992, 1993
    4  *      The Regents of the University of California.  All rights reserved.
    5  *
    6  * This code is derived from software contributed to Berkeley by
    7  * Stephen Deering of Stanford University.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 4. Neither the name of the University nor the names of its contributors
   18  *    may be used to endorse or promote products derived from this software
   19  *    without specific prior written permission.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   31  * SUCH DAMAGE.
   32  *
   33  *      @(#)ip_mroute.c 8.2 (Berkeley) 11/15/93
   34  */
   35 
   36 /*
   37  * IP multicast forwarding procedures
   38  *
   39  * Written by David Waitzman, BBN Labs, August 1988.
   40  * Modified by Steve Deering, Stanford, February 1989.
   41  * Modified by Mark J. Steiglitz, Stanford, May, 1991
   42  * Modified by Van Jacobson, LBL, January 1993
   43  * Modified by Ajit Thyagarajan, PARC, August 1993
   44  * Modified by Bill Fenner, PARC, April 1995
   45  * Modified by Ahmed Helmy, SGI, June 1996
   46  * Modified by George Edmond Eddy (Rusty), ISI, February 1998
   47  * Modified by Pavlin Radoslavov, USC/ISI, May 1998, August 1999, October 2000
   48  * Modified by Hitoshi Asaeda, WIDE, August 2000
   49  * Modified by Pavlin Radoslavov, ICSI, October 2002
   50  *
   51  * MROUTING Revision: 3.5
   52  * and PIM-SMv2 and PIM-DM support, advanced API support,
   53  * bandwidth metering and signaling
   54  */
   55 
   56 /*
   57  * TODO: Prefix functions with ipmf_.
   58  * TODO: Maintain a refcount on if_allmulti() in ifnet or in the protocol
   59  * domain attachment (if_afdata) so we can track consumers of that service.
   60  * TODO: Deprecate routing socket path for SIOCGETSGCNT and SIOCGETVIFCNT,
   61  * move it to socket options.
   62  * TODO: Cleanup LSRR removal further.
   63  * TODO: Push RSVP stubs into raw_ip.c.
   64  * TODO: Use bitstring.h for vif set.
   65  * TODO: Fix mrt6_ioctl dangling ref when dynamically loaded.
   66  * TODO: Sync ip6_mroute.c with this file.
   67  */
   68 
   69 #include <sys/cdefs.h>
   70 __FBSDID("$FreeBSD: releng/9.0/sys/netinet/ip_mroute.c 215701 2010-11-22 19:32:54Z dim $");
   71 
   72 #include "opt_inet.h"
   73 #include "opt_mrouting.h"
   74 
   75 #define _PIM_VT 1
   76 
   77 #include <sys/param.h>
   78 #include <sys/kernel.h>
   79 #include <sys/stddef.h>
   80 #include <sys/lock.h>
   81 #include <sys/ktr.h>
   82 #include <sys/malloc.h>
   83 #include <sys/mbuf.h>
   84 #include <sys/module.h>
   85 #include <sys/priv.h>
   86 #include <sys/protosw.h>
   87 #include <sys/signalvar.h>
   88 #include <sys/socket.h>
   89 #include <sys/socketvar.h>
   90 #include <sys/sockio.h>
   91 #include <sys/sx.h>
   92 #include <sys/sysctl.h>
   93 #include <sys/syslog.h>
   94 #include <sys/systm.h>
   95 #include <sys/time.h>
   96 
   97 #include <net/if.h>
   98 #include <net/netisr.h>
   99 #include <net/route.h>
  100 #include <net/vnet.h>
  101 
  102 #include <netinet/in.h>
  103 #include <netinet/igmp.h>
  104 #include <netinet/in_systm.h>
  105 #include <netinet/in_var.h>
  106 #include <netinet/ip.h>
  107 #include <netinet/ip_encap.h>
  108 #include <netinet/ip_mroute.h>
  109 #include <netinet/ip_var.h>
  110 #include <netinet/ip_options.h>
  111 #include <netinet/pim.h>
  112 #include <netinet/pim_var.h>
  113 #include <netinet/udp.h>
  114 
  115 #include <machine/in_cksum.h>
  116 
  117 #ifndef KTR_IPMF
  118 #define KTR_IPMF KTR_INET
  119 #endif
  120 
  121 #define         VIFI_INVALID    ((vifi_t) -1)
  122 #define         M_HASCL(m)      ((m)->m_flags & M_EXT)
  123 
  124 static VNET_DEFINE(uint32_t, last_tv_sec); /* last time we processed this */
  125 #define V_last_tv_sec   VNET(last_tv_sec)
  126 
  127 static MALLOC_DEFINE(M_MRTABLE, "mroutetbl", "multicast forwarding cache");
  128 
  129 /*
  130  * Locking.  We use two locks: one for the virtual interface table and
  131  * one for the forwarding table.  These locks may be nested in which case
  132  * the VIF lock must always be taken first.  Note that each lock is used
  133  * to cover not only the specific data structure but also related data
  134  * structures.
  135  */
  136 
  137 static struct mtx mrouter_mtx;
  138 #define MROUTER_LOCK()          mtx_lock(&mrouter_mtx)
  139 #define MROUTER_UNLOCK()        mtx_unlock(&mrouter_mtx)
  140 #define MROUTER_LOCK_ASSERT()   mtx_assert(&mrouter_mtx, MA_OWNED)
  141 #define MROUTER_LOCK_INIT()                                             \
  142         mtx_init(&mrouter_mtx, "IPv4 multicast forwarding", NULL, MTX_DEF)
  143 #define MROUTER_LOCK_DESTROY()  mtx_destroy(&mrouter_mtx)
  144 
  145 static int ip_mrouter_cnt;      /* # of vnets with active mrouters */
  146 static int ip_mrouter_unloading; /* Allow no more V_ip_mrouter sockets */
  147 
  148 static VNET_DEFINE(struct mrtstat, mrtstat);
  149 #define V_mrtstat               VNET(mrtstat)
  150 SYSCTL_VNET_STRUCT(_net_inet_ip, OID_AUTO, mrtstat, CTLFLAG_RW,
  151     &VNET_NAME(mrtstat), mrtstat,
  152     "IPv4 Multicast Forwarding Statistics (struct mrtstat, "
  153     "netinet/ip_mroute.h)");
  154 
  155 static VNET_DEFINE(u_long, mfchash);
  156 #define V_mfchash               VNET(mfchash)
  157 #define MFCHASH(a, g)                                                   \
  158         ((((a).s_addr >> 20) ^ ((a).s_addr >> 10) ^ (a).s_addr ^ \
  159           ((g).s_addr >> 20) ^ ((g).s_addr >> 10) ^ (g).s_addr) & V_mfchash)
  160 #define MFCHASHSIZE     256
  161 
  162 static u_long mfchashsize;                      /* Hash size */
  163 static VNET_DEFINE(u_char *, nexpire);          /* 0..mfchashsize-1 */
  164 #define V_nexpire               VNET(nexpire)
  165 static VNET_DEFINE(LIST_HEAD(mfchashhdr, mfc)*, mfchashtbl);
  166 #define V_mfchashtbl            VNET(mfchashtbl)
  167 
  168 static struct mtx mfc_mtx;
  169 #define MFC_LOCK()              mtx_lock(&mfc_mtx)
  170 #define MFC_UNLOCK()            mtx_unlock(&mfc_mtx)
  171 #define MFC_LOCK_ASSERT()       mtx_assert(&mfc_mtx, MA_OWNED)
  172 #define MFC_LOCK_INIT()                                                 \
  173         mtx_init(&mfc_mtx, "IPv4 multicast forwarding cache", NULL, MTX_DEF)
  174 #define MFC_LOCK_DESTROY()      mtx_destroy(&mfc_mtx)
  175 
  176 static VNET_DEFINE(vifi_t, numvifs);
  177 #define V_numvifs               VNET(numvifs)
  178 static VNET_DEFINE(struct vif, viftable[MAXVIFS]);
  179 #define V_viftable              VNET(viftable)
  180 SYSCTL_VNET_OPAQUE(_net_inet_ip, OID_AUTO, viftable, CTLFLAG_RD,
  181     &VNET_NAME(viftable), sizeof(V_viftable), "S,vif[MAXVIFS]",
  182     "IPv4 Multicast Interfaces (struct vif[MAXVIFS], netinet/ip_mroute.h)");
  183 
  184 static struct mtx vif_mtx;
  185 #define VIF_LOCK()              mtx_lock(&vif_mtx)
  186 #define VIF_UNLOCK()            mtx_unlock(&vif_mtx)
  187 #define VIF_LOCK_ASSERT()       mtx_assert(&vif_mtx, MA_OWNED)
  188 #define VIF_LOCK_INIT()                                                 \
  189         mtx_init(&vif_mtx, "IPv4 multicast interfaces", NULL, MTX_DEF)
  190 #define VIF_LOCK_DESTROY()      mtx_destroy(&vif_mtx)
  191 
  192 static eventhandler_tag if_detach_event_tag = NULL;
  193 
  194 static VNET_DEFINE(struct callout, expire_upcalls_ch);
  195 #define V_expire_upcalls_ch     VNET(expire_upcalls_ch)
  196 
  197 #define         EXPIRE_TIMEOUT  (hz / 4)        /* 4x / second          */
  198 #define         UPCALL_EXPIRE   6               /* number of timeouts   */
  199 
  200 /*
  201  * Bandwidth meter variables and constants
  202  */
  203 static MALLOC_DEFINE(M_BWMETER, "bwmeter", "multicast upcall bw meters");
  204 /*
  205  * Pending timeouts are stored in a hash table, the key being the
  206  * expiration time. Periodically, the entries are analysed and processed.
  207  */
  208 #define BW_METER_BUCKETS        1024
  209 static VNET_DEFINE(struct bw_meter*, bw_meter_timers[BW_METER_BUCKETS]);
  210 #define V_bw_meter_timers       VNET(bw_meter_timers)
  211 static VNET_DEFINE(struct callout, bw_meter_ch);
  212 #define V_bw_meter_ch           VNET(bw_meter_ch)
  213 #define BW_METER_PERIOD (hz)            /* periodical handling of bw meters */
  214 
  215 /*
  216  * Pending upcalls are stored in a vector which is flushed when
  217  * full, or periodically
  218  */
  219 static VNET_DEFINE(struct bw_upcall, bw_upcalls[BW_UPCALLS_MAX]);
  220 #define V_bw_upcalls            VNET(bw_upcalls)
  221 static VNET_DEFINE(u_int, bw_upcalls_n); /* # of pending upcalls */
  222 #define V_bw_upcalls_n          VNET(bw_upcalls_n)
  223 static VNET_DEFINE(struct callout, bw_upcalls_ch);
  224 #define V_bw_upcalls_ch         VNET(bw_upcalls_ch)
  225 
  226 #define BW_UPCALLS_PERIOD (hz)          /* periodical flush of bw upcalls */
  227 
  228 static VNET_DEFINE(struct pimstat, pimstat);
  229 #define V_pimstat               VNET(pimstat)
  230 
  231 SYSCTL_NODE(_net_inet, IPPROTO_PIM, pim, CTLFLAG_RW, 0, "PIM");
  232 SYSCTL_VNET_STRUCT(_net_inet_pim, PIMCTL_STATS, stats, CTLFLAG_RD,
  233     &VNET_NAME(pimstat), pimstat,
  234     "PIM Statistics (struct pimstat, netinet/pim_var.h)");
  235 
  236 static u_long   pim_squelch_wholepkt = 0;
  237 SYSCTL_ULONG(_net_inet_pim, OID_AUTO, squelch_wholepkt, CTLFLAG_RW,
  238     &pim_squelch_wholepkt, 0,
  239     "Disable IGMP_WHOLEPKT notifications if rendezvous point is unspecified");
  240 
  241 extern  struct domain inetdomain;
  242 static const struct protosw in_pim_protosw = {
  243         .pr_type =              SOCK_RAW,
  244         .pr_domain =            &inetdomain,
  245         .pr_protocol =          IPPROTO_PIM,
  246         .pr_flags =             PR_ATOMIC|PR_ADDR|PR_LASTHDR,
  247         .pr_input =             pim_input,
  248         .pr_output =            (pr_output_t*)rip_output,
  249         .pr_ctloutput =         rip_ctloutput,
  250         .pr_usrreqs =           &rip_usrreqs
  251 };
  252 static const struct encaptab *pim_encap_cookie;
  253 
  254 static int pim_encapcheck(const struct mbuf *, int, int, void *);
  255 
  256 /*
  257  * Note: the PIM Register encapsulation adds the following in front of a
  258  * data packet:
  259  *
  260  * struct pim_encap_hdr {
  261  *    struct ip ip;
  262  *    struct pim_encap_pimhdr  pim;
  263  * }
  264  *
  265  */
  266 
  267 struct pim_encap_pimhdr {
  268         struct pim pim;
  269         uint32_t   flags;
  270 };
  271 #define         PIM_ENCAP_TTL   64
  272 
  273 static struct ip pim_encap_iphdr = {
  274 #if BYTE_ORDER == LITTLE_ENDIAN
  275         sizeof(struct ip) >> 2,
  276         IPVERSION,
  277 #else
  278         IPVERSION,
  279         sizeof(struct ip) >> 2,
  280 #endif
  281         0,                      /* tos */
  282         sizeof(struct ip),      /* total length */
  283         0,                      /* id */
  284         0,                      /* frag offset */
  285         PIM_ENCAP_TTL,
  286         IPPROTO_PIM,
  287         0,                      /* checksum */
  288 };
  289 
  290 static struct pim_encap_pimhdr pim_encap_pimhdr = {
  291     {
  292         PIM_MAKE_VT(PIM_VERSION, PIM_REGISTER), /* PIM vers and message type */
  293         0,                      /* reserved */
  294         0,                      /* checksum */
  295     },
  296     0                           /* flags */
  297 };
  298 
  299 static VNET_DEFINE(vifi_t, reg_vif_num) = VIFI_INVALID;
  300 #define V_reg_vif_num           VNET(reg_vif_num)
  301 static VNET_DEFINE(struct ifnet, multicast_register_if);
  302 #define V_multicast_register_if VNET(multicast_register_if)
  303 
  304 /*
  305  * Private variables.
  306  */
  307 
  308 static u_long   X_ip_mcast_src(int);
  309 static int      X_ip_mforward(struct ip *, struct ifnet *, struct mbuf *,
  310                     struct ip_moptions *);
  311 static int      X_ip_mrouter_done(void);
  312 static int      X_ip_mrouter_get(struct socket *, struct sockopt *);
  313 static int      X_ip_mrouter_set(struct socket *, struct sockopt *);
  314 static int      X_legal_vif_num(int);
  315 static int      X_mrt_ioctl(u_long, caddr_t, int);
  316 
  317 static int      add_bw_upcall(struct bw_upcall *);
  318 static int      add_mfc(struct mfcctl2 *);
  319 static int      add_vif(struct vifctl *);
  320 static void     bw_meter_prepare_upcall(struct bw_meter *, struct timeval *);
  321 static void     bw_meter_process(void);
  322 static void     bw_meter_receive_packet(struct bw_meter *, int,
  323                     struct timeval *);
  324 static void     bw_upcalls_send(void);
  325 static int      del_bw_upcall(struct bw_upcall *);
  326 static int      del_mfc(struct mfcctl2 *);
  327 static int      del_vif(vifi_t);
  328 static int      del_vif_locked(vifi_t);
  329 static void     expire_bw_meter_process(void *);
  330 static void     expire_bw_upcalls_send(void *);
  331 static void     expire_mfc(struct mfc *);
  332 static void     expire_upcalls(void *);
  333 static void     free_bw_list(struct bw_meter *);
  334 static int      get_sg_cnt(struct sioc_sg_req *);
  335 static int      get_vif_cnt(struct sioc_vif_req *);
  336 static void     if_detached_event(void *, struct ifnet *);
  337 static int      ip_mdq(struct mbuf *, struct ifnet *, struct mfc *, vifi_t);
  338 static int      ip_mrouter_init(struct socket *, int);
  339 static __inline struct mfc *
  340                 mfc_find(struct in_addr *, struct in_addr *);
  341 static void     phyint_send(struct ip *, struct vif *, struct mbuf *);
  342 static struct mbuf *
  343                 pim_register_prepare(struct ip *, struct mbuf *);
  344 static int      pim_register_send(struct ip *, struct vif *,
  345                     struct mbuf *, struct mfc *);
  346 static int      pim_register_send_rp(struct ip *, struct vif *,
  347                     struct mbuf *, struct mfc *);
  348 static int      pim_register_send_upcall(struct ip *, struct vif *,
  349                     struct mbuf *, struct mfc *);
  350 static void     schedule_bw_meter(struct bw_meter *, struct timeval *);
  351 static void     send_packet(struct vif *, struct mbuf *);
  352 static int      set_api_config(uint32_t *);
  353 static int      set_assert(int);
  354 static int      socket_send(struct socket *, struct mbuf *,
  355                     struct sockaddr_in *);
  356 static void     unschedule_bw_meter(struct bw_meter *);
  357 
  358 /*
  359  * Kernel multicast forwarding API capabilities and setup.
  360  * If more API capabilities are added to the kernel, they should be
  361  * recorded in `mrt_api_support'.
  362  */
  363 #define MRT_API_VERSION         0x0305
  364 
  365 static const int mrt_api_version = MRT_API_VERSION;
  366 static const uint32_t mrt_api_support = (MRT_MFC_FLAGS_DISABLE_WRONGVIF |
  367                                          MRT_MFC_FLAGS_BORDER_VIF |
  368                                          MRT_MFC_RP |
  369                                          MRT_MFC_BW_UPCALL);
  370 static VNET_DEFINE(uint32_t, mrt_api_config);
  371 #define V_mrt_api_config        VNET(mrt_api_config)
  372 static VNET_DEFINE(int, pim_assert_enabled);
  373 #define V_pim_assert_enabled    VNET(pim_assert_enabled)
  374 static struct timeval pim_assert_interval = { 3, 0 };   /* Rate limit */
  375 
  376 /*
  377  * Find a route for a given origin IP address and multicast group address.
  378  * Statistics must be updated by the caller.
  379  */
  380 static __inline struct mfc *
  381 mfc_find(struct in_addr *o, struct in_addr *g)
  382 {
  383         struct mfc *rt;
  384 
  385         MFC_LOCK_ASSERT();
  386 
  387         LIST_FOREACH(rt, &V_mfchashtbl[MFCHASH(*o, *g)], mfc_hash) {
  388                 if (in_hosteq(rt->mfc_origin, *o) &&
  389                     in_hosteq(rt->mfc_mcastgrp, *g) &&
  390                     TAILQ_EMPTY(&rt->mfc_stall))
  391                         break;
  392         }
  393 
  394         return (rt);
  395 }
  396 
  397 /*
  398  * Handle MRT setsockopt commands to modify the multicast forwarding tables.
  399  */
  400 static int
  401 X_ip_mrouter_set(struct socket *so, struct sockopt *sopt)
  402 {
  403     int error, optval;
  404     vifi_t      vifi;
  405     struct      vifctl vifc;
  406     struct      mfcctl2 mfc;
  407     struct      bw_upcall bw_upcall;
  408     uint32_t    i;
  409 
  410     if (so != V_ip_mrouter && sopt->sopt_name != MRT_INIT)
  411         return EPERM;
  412 
  413     error = 0;
  414     switch (sopt->sopt_name) {
  415     case MRT_INIT:
  416         error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval);
  417         if (error)
  418             break;
  419         error = ip_mrouter_init(so, optval);
  420         break;
  421 
  422     case MRT_DONE:
  423         error = ip_mrouter_done();
  424         break;
  425 
  426     case MRT_ADD_VIF:
  427         error = sooptcopyin(sopt, &vifc, sizeof vifc, sizeof vifc);
  428         if (error)
  429             break;
  430         error = add_vif(&vifc);
  431         break;
  432 
  433     case MRT_DEL_VIF:
  434         error = sooptcopyin(sopt, &vifi, sizeof vifi, sizeof vifi);
  435         if (error)
  436             break;
  437         error = del_vif(vifi);
  438         break;
  439 
  440     case MRT_ADD_MFC:
  441     case MRT_DEL_MFC:
  442         /*
  443          * select data size depending on API version.
  444          */
  445         if (sopt->sopt_name == MRT_ADD_MFC &&
  446                 V_mrt_api_config & MRT_API_FLAGS_ALL) {
  447             error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl2),
  448                                 sizeof(struct mfcctl2));
  449         } else {
  450             error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl),
  451                                 sizeof(struct mfcctl));
  452             bzero((caddr_t)&mfc + sizeof(struct mfcctl),
  453                         sizeof(mfc) - sizeof(struct mfcctl));
  454         }
  455         if (error)
  456             break;
  457         if (sopt->sopt_name == MRT_ADD_MFC)
  458             error = add_mfc(&mfc);
  459         else
  460             error = del_mfc(&mfc);
  461         break;
  462 
  463     case MRT_ASSERT:
  464         error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval);
  465         if (error)
  466             break;
  467         set_assert(optval);
  468         break;
  469 
  470     case MRT_API_CONFIG:
  471         error = sooptcopyin(sopt, &i, sizeof i, sizeof i);
  472         if (!error)
  473             error = set_api_config(&i);
  474         if (!error)
  475             error = sooptcopyout(sopt, &i, sizeof i);
  476         break;
  477 
  478     case MRT_ADD_BW_UPCALL:
  479     case MRT_DEL_BW_UPCALL:
  480         error = sooptcopyin(sopt, &bw_upcall, sizeof bw_upcall,
  481                                 sizeof bw_upcall);
  482         if (error)
  483             break;
  484         if (sopt->sopt_name == MRT_ADD_BW_UPCALL)
  485             error = add_bw_upcall(&bw_upcall);
  486         else
  487             error = del_bw_upcall(&bw_upcall);
  488         break;
  489 
  490     default:
  491         error = EOPNOTSUPP;
  492         break;
  493     }
  494     return error;
  495 }
  496 
  497 /*
  498  * Handle MRT getsockopt commands
  499  */
  500 static int
  501 X_ip_mrouter_get(struct socket *so, struct sockopt *sopt)
  502 {
  503     int error;
  504 
  505     switch (sopt->sopt_name) {
  506     case MRT_VERSION:
  507         error = sooptcopyout(sopt, &mrt_api_version, sizeof mrt_api_version);
  508         break;
  509 
  510     case MRT_ASSERT:
  511         error = sooptcopyout(sopt, &V_pim_assert_enabled,
  512             sizeof V_pim_assert_enabled);
  513         break;
  514 
  515     case MRT_API_SUPPORT:
  516         error = sooptcopyout(sopt, &mrt_api_support, sizeof mrt_api_support);
  517         break;
  518 
  519     case MRT_API_CONFIG:
  520         error = sooptcopyout(sopt, &V_mrt_api_config, sizeof V_mrt_api_config);
  521         break;
  522 
  523     default:
  524         error = EOPNOTSUPP;
  525         break;
  526     }
  527     return error;
  528 }
  529 
  530 /*
  531  * Handle ioctl commands to obtain information from the cache
  532  */
  533 static int
  534 X_mrt_ioctl(u_long cmd, caddr_t data, int fibnum __unused)
  535 {
  536     int error = 0;
  537 
  538     /*
  539      * Currently the only function calling this ioctl routine is rtioctl().
  540      * Typically, only root can create the raw socket in order to execute
  541      * this ioctl method, however the request might be coming from a prison
  542      */
  543     error = priv_check(curthread, PRIV_NETINET_MROUTE);
  544     if (error)
  545         return (error);
  546     switch (cmd) {
  547     case (SIOCGETVIFCNT):
  548         error = get_vif_cnt((struct sioc_vif_req *)data);
  549         break;
  550 
  551     case (SIOCGETSGCNT):
  552         error = get_sg_cnt((struct sioc_sg_req *)data);
  553         break;
  554 
  555     default:
  556         error = EINVAL;
  557         break;
  558     }
  559     return error;
  560 }
  561 
  562 /*
  563  * returns the packet, byte, rpf-failure count for the source group provided
  564  */
  565 static int
  566 get_sg_cnt(struct sioc_sg_req *req)
  567 {
  568     struct mfc *rt;
  569 
  570     MFC_LOCK();
  571     rt = mfc_find(&req->src, &req->grp);
  572     if (rt == NULL) {
  573         MFC_UNLOCK();
  574         req->pktcnt = req->bytecnt = req->wrong_if = 0xffffffff;
  575         return EADDRNOTAVAIL;
  576     }
  577     req->pktcnt = rt->mfc_pkt_cnt;
  578     req->bytecnt = rt->mfc_byte_cnt;
  579     req->wrong_if = rt->mfc_wrong_if;
  580     MFC_UNLOCK();
  581     return 0;
  582 }
  583 
  584 /*
  585  * returns the input and output packet and byte counts on the vif provided
  586  */
  587 static int
  588 get_vif_cnt(struct sioc_vif_req *req)
  589 {
  590     vifi_t vifi = req->vifi;
  591 
  592     VIF_LOCK();
  593     if (vifi >= V_numvifs) {
  594         VIF_UNLOCK();
  595         return EINVAL;
  596     }
  597 
  598     req->icount = V_viftable[vifi].v_pkt_in;
  599     req->ocount = V_viftable[vifi].v_pkt_out;
  600     req->ibytes = V_viftable[vifi].v_bytes_in;
  601     req->obytes = V_viftable[vifi].v_bytes_out;
  602     VIF_UNLOCK();
  603 
  604     return 0;
  605 }
  606 
  607 static void
  608 if_detached_event(void *arg __unused, struct ifnet *ifp)
  609 {
  610     vifi_t vifi;
  611     int i;
  612 
  613     MROUTER_LOCK();
  614 
  615     if (V_ip_mrouter == NULL) {
  616         MROUTER_UNLOCK();
  617         return;
  618     }
  619 
  620     VIF_LOCK();
  621     MFC_LOCK();
  622 
  623     /*
  624      * Tear down multicast forwarder state associated with this ifnet.
  625      * 1. Walk the vif list, matching vifs against this ifnet.
  626      * 2. Walk the multicast forwarding cache (mfc) looking for
  627      *    inner matches with this vif's index.
  628      * 3. Expire any matching multicast forwarding cache entries.
  629      * 4. Free vif state. This should disable ALLMULTI on the interface.
  630      */
  631     for (vifi = 0; vifi < V_numvifs; vifi++) {
  632         if (V_viftable[vifi].v_ifp != ifp)
  633                 continue;
  634         for (i = 0; i < mfchashsize; i++) {
  635                 struct mfc *rt, *nrt;
  636                 for (rt = LIST_FIRST(&V_mfchashtbl[i]); rt; rt = nrt) {
  637                         nrt = LIST_NEXT(rt, mfc_hash);
  638                         if (rt->mfc_parent == vifi) {
  639                                 expire_mfc(rt);
  640                         }
  641                 }
  642         }
  643         del_vif_locked(vifi);
  644     }
  645 
  646     MFC_UNLOCK();
  647     VIF_UNLOCK();
  648 
  649     MROUTER_UNLOCK();
  650 }
  651                         
  652 /*
  653  * Enable multicast forwarding.
  654  */
  655 static int
  656 ip_mrouter_init(struct socket *so, int version)
  657 {
  658 
  659     CTR3(KTR_IPMF, "%s: so_type %d, pr_protocol %d", __func__,
  660         so->so_type, so->so_proto->pr_protocol);
  661 
  662     if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_IGMP)
  663         return EOPNOTSUPP;
  664 
  665     if (version != 1)
  666         return ENOPROTOOPT;
  667 
  668     MROUTER_LOCK();
  669 
  670     if (ip_mrouter_unloading) {
  671         MROUTER_UNLOCK();
  672         return ENOPROTOOPT;
  673     }
  674 
  675     if (V_ip_mrouter != NULL) {
  676         MROUTER_UNLOCK();
  677         return EADDRINUSE;
  678     }
  679 
  680     V_mfchashtbl = hashinit_flags(mfchashsize, M_MRTABLE, &V_mfchash,
  681         HASH_NOWAIT);
  682 
  683     callout_reset(&V_expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls,
  684         curvnet);
  685     callout_reset(&V_bw_upcalls_ch, BW_UPCALLS_PERIOD, expire_bw_upcalls_send,
  686         curvnet);
  687     callout_reset(&V_bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process,
  688         curvnet);
  689 
  690     V_ip_mrouter = so;
  691     ip_mrouter_cnt++;
  692 
  693     MROUTER_UNLOCK();
  694 
  695     CTR1(KTR_IPMF, "%s: done", __func__);
  696 
  697     return 0;
  698 }
  699 
  700 /*
  701  * Disable multicast forwarding.
  702  */
  703 static int
  704 X_ip_mrouter_done(void)
  705 {
  706     vifi_t vifi;
  707     int i;
  708     struct ifnet *ifp;
  709     struct ifreq ifr;
  710 
  711     MROUTER_LOCK();
  712 
  713     if (V_ip_mrouter == NULL) {
  714         MROUTER_UNLOCK();
  715         return EINVAL;
  716     }
  717 
  718     /*
  719      * Detach/disable hooks to the reset of the system.
  720      */
  721     V_ip_mrouter = NULL;
  722     ip_mrouter_cnt--;
  723     V_mrt_api_config = 0;
  724 
  725     VIF_LOCK();
  726 
  727     /*
  728      * For each phyint in use, disable promiscuous reception of all IP
  729      * multicasts.
  730      */
  731     for (vifi = 0; vifi < V_numvifs; vifi++) {
  732         if (!in_nullhost(V_viftable[vifi].v_lcl_addr) &&
  733                 !(V_viftable[vifi].v_flags & (VIFF_TUNNEL | VIFF_REGISTER))) {
  734             struct sockaddr_in *so = (struct sockaddr_in *)&(ifr.ifr_addr);
  735 
  736             so->sin_len = sizeof(struct sockaddr_in);
  737             so->sin_family = AF_INET;
  738             so->sin_addr.s_addr = INADDR_ANY;
  739             ifp = V_viftable[vifi].v_ifp;
  740             if_allmulti(ifp, 0);
  741         }
  742     }
  743     bzero((caddr_t)V_viftable, sizeof(V_viftable));
  744     V_numvifs = 0;
  745     V_pim_assert_enabled = 0;
  746     
  747     VIF_UNLOCK();
  748 
  749     callout_stop(&V_expire_upcalls_ch);
  750     callout_stop(&V_bw_upcalls_ch);
  751     callout_stop(&V_bw_meter_ch);
  752 
  753     MFC_LOCK();
  754 
  755     /*
  756      * Free all multicast forwarding cache entries.
  757      * Do not use hashdestroy(), as we must perform other cleanup.
  758      */
  759     for (i = 0; i < mfchashsize; i++) {
  760         struct mfc *rt, *nrt;
  761         for (rt = LIST_FIRST(&V_mfchashtbl[i]); rt; rt = nrt) {
  762                 nrt = LIST_NEXT(rt, mfc_hash);
  763                 expire_mfc(rt);
  764         }
  765     }
  766     free(V_mfchashtbl, M_MRTABLE);
  767     V_mfchashtbl = NULL;
  768 
  769     bzero(V_nexpire, sizeof(V_nexpire[0]) * mfchashsize);
  770 
  771     V_bw_upcalls_n = 0;
  772     bzero(V_bw_meter_timers, sizeof(V_bw_meter_timers));
  773 
  774     MFC_UNLOCK();
  775 
  776     V_reg_vif_num = VIFI_INVALID;
  777 
  778     MROUTER_UNLOCK();
  779 
  780     CTR1(KTR_IPMF, "%s: done", __func__);
  781 
  782     return 0;
  783 }
  784 
  785 /*
  786  * Set PIM assert processing global
  787  */
  788 static int
  789 set_assert(int i)
  790 {
  791     if ((i != 1) && (i != 0))
  792         return EINVAL;
  793 
  794     V_pim_assert_enabled = i;
  795 
  796     return 0;
  797 }
  798 
  799 /*
  800  * Configure API capabilities
  801  */
  802 int
  803 set_api_config(uint32_t *apival)
  804 {
  805     int i;
  806 
  807     /*
  808      * We can set the API capabilities only if it is the first operation
  809      * after MRT_INIT. I.e.:
  810      *  - there are no vifs installed
  811      *  - pim_assert is not enabled
  812      *  - the MFC table is empty
  813      */
  814     if (V_numvifs > 0) {
  815         *apival = 0;
  816         return EPERM;
  817     }
  818     if (V_pim_assert_enabled) {
  819         *apival = 0;
  820         return EPERM;
  821     }
  822 
  823     MFC_LOCK();
  824 
  825     for (i = 0; i < mfchashsize; i++) {
  826         if (LIST_FIRST(&V_mfchashtbl[i]) != NULL) {
  827             *apival = 0;
  828             return EPERM;
  829         }
  830     }
  831 
  832     MFC_UNLOCK();
  833 
  834     V_mrt_api_config = *apival & mrt_api_support;
  835     *apival = V_mrt_api_config;
  836 
  837     return 0;
  838 }
  839 
  840 /*
  841  * Add a vif to the vif table
  842  */
  843 static int
  844 add_vif(struct vifctl *vifcp)
  845 {
  846     struct vif *vifp = V_viftable + vifcp->vifc_vifi;
  847     struct sockaddr_in sin = {sizeof sin, AF_INET};
  848     struct ifaddr *ifa;
  849     struct ifnet *ifp;
  850     int error;
  851 
  852     VIF_LOCK();
  853     if (vifcp->vifc_vifi >= MAXVIFS) {
  854         VIF_UNLOCK();
  855         return EINVAL;
  856     }
  857     /* rate limiting is no longer supported by this code */
  858     if (vifcp->vifc_rate_limit != 0) {
  859         log(LOG_ERR, "rate limiting is no longer supported\n");
  860         VIF_UNLOCK();
  861         return EINVAL;
  862     }
  863     if (!in_nullhost(vifp->v_lcl_addr)) {
  864         VIF_UNLOCK();
  865         return EADDRINUSE;
  866     }
  867     if (in_nullhost(vifcp->vifc_lcl_addr)) {
  868         VIF_UNLOCK();
  869         return EADDRNOTAVAIL;
  870     }
  871 
  872     /* Find the interface with an address in AF_INET family */
  873     if (vifcp->vifc_flags & VIFF_REGISTER) {
  874         /*
  875          * XXX: Because VIFF_REGISTER does not really need a valid
  876          * local interface (e.g. it could be 127.0.0.2), we don't
  877          * check its address.
  878          */
  879         ifp = NULL;
  880     } else {
  881         sin.sin_addr = vifcp->vifc_lcl_addr;
  882         ifa = ifa_ifwithaddr((struct sockaddr *)&sin);
  883         if (ifa == NULL) {
  884             VIF_UNLOCK();
  885             return EADDRNOTAVAIL;
  886         }
  887         ifp = ifa->ifa_ifp;
  888         ifa_free(ifa);
  889     }
  890 
  891     if ((vifcp->vifc_flags & VIFF_TUNNEL) != 0) {
  892         CTR1(KTR_IPMF, "%s: tunnels are no longer supported", __func__);
  893         VIF_UNLOCK();
  894         return EOPNOTSUPP;
  895     } else if (vifcp->vifc_flags & VIFF_REGISTER) {
  896         ifp = &V_multicast_register_if;
  897         CTR2(KTR_IPMF, "%s: add register vif for ifp %p", __func__, ifp);
  898         if (V_reg_vif_num == VIFI_INVALID) {
  899             if_initname(&V_multicast_register_if, "register_vif", 0);
  900             V_multicast_register_if.if_flags = IFF_LOOPBACK;
  901             V_reg_vif_num = vifcp->vifc_vifi;
  902         }
  903     } else {            /* Make sure the interface supports multicast */
  904         if ((ifp->if_flags & IFF_MULTICAST) == 0) {
  905             VIF_UNLOCK();
  906             return EOPNOTSUPP;
  907         }
  908 
  909         /* Enable promiscuous reception of all IP multicasts from the if */
  910         error = if_allmulti(ifp, 1);
  911         if (error) {
  912             VIF_UNLOCK();
  913             return error;
  914         }
  915     }
  916 
  917     vifp->v_flags     = vifcp->vifc_flags;
  918     vifp->v_threshold = vifcp->vifc_threshold;
  919     vifp->v_lcl_addr  = vifcp->vifc_lcl_addr;
  920     vifp->v_rmt_addr  = vifcp->vifc_rmt_addr;
  921     vifp->v_ifp       = ifp;
  922     /* initialize per vif pkt counters */
  923     vifp->v_pkt_in    = 0;
  924     vifp->v_pkt_out   = 0;
  925     vifp->v_bytes_in  = 0;
  926     vifp->v_bytes_out = 0;
  927     bzero(&vifp->v_route, sizeof(vifp->v_route));
  928 
  929     /* Adjust numvifs up if the vifi is higher than numvifs */
  930     if (V_numvifs <= vifcp->vifc_vifi)
  931         V_numvifs = vifcp->vifc_vifi + 1;
  932 
  933     VIF_UNLOCK();
  934 
  935     CTR4(KTR_IPMF, "%s: add vif %d laddr %s thresh %x", __func__,
  936         (int)vifcp->vifc_vifi, inet_ntoa(vifcp->vifc_lcl_addr),
  937         (int)vifcp->vifc_threshold);
  938 
  939     return 0;
  940 }
  941 
  942 /*
  943  * Delete a vif from the vif table
  944  */
  945 static int
  946 del_vif_locked(vifi_t vifi)
  947 {
  948     struct vif *vifp;
  949 
  950     VIF_LOCK_ASSERT();
  951 
  952     if (vifi >= V_numvifs) {
  953         return EINVAL;
  954     }
  955     vifp = &V_viftable[vifi];
  956     if (in_nullhost(vifp->v_lcl_addr)) {
  957         return EADDRNOTAVAIL;
  958     }
  959 
  960     if (!(vifp->v_flags & (VIFF_TUNNEL | VIFF_REGISTER)))
  961         if_allmulti(vifp->v_ifp, 0);
  962 
  963     if (vifp->v_flags & VIFF_REGISTER)
  964         V_reg_vif_num = VIFI_INVALID;
  965 
  966     bzero((caddr_t)vifp, sizeof (*vifp));
  967 
  968     CTR2(KTR_IPMF, "%s: delete vif %d", __func__, (int)vifi);
  969 
  970     /* Adjust numvifs down */
  971     for (vifi = V_numvifs; vifi > 0; vifi--)
  972         if (!in_nullhost(V_viftable[vifi-1].v_lcl_addr))
  973             break;
  974     V_numvifs = vifi;
  975 
  976     return 0;
  977 }
  978 
  979 static int
  980 del_vif(vifi_t vifi)
  981 {
  982     int cc;
  983 
  984     VIF_LOCK();
  985     cc = del_vif_locked(vifi);
  986     VIF_UNLOCK();
  987 
  988     return cc;
  989 }
  990 
  991 /*
  992  * update an mfc entry without resetting counters and S,G addresses.
  993  */
  994 static void
  995 update_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp)
  996 {
  997     int i;
  998 
  999     rt->mfc_parent = mfccp->mfcc_parent;
 1000     for (i = 0; i < V_numvifs; i++) {
 1001         rt->mfc_ttls[i] = mfccp->mfcc_ttls[i];
 1002         rt->mfc_flags[i] = mfccp->mfcc_flags[i] & V_mrt_api_config &
 1003             MRT_MFC_FLAGS_ALL;
 1004     }
 1005     /* set the RP address */
 1006     if (V_mrt_api_config & MRT_MFC_RP)
 1007         rt->mfc_rp = mfccp->mfcc_rp;
 1008     else
 1009         rt->mfc_rp.s_addr = INADDR_ANY;
 1010 }
 1011 
 1012 /*
 1013  * fully initialize an mfc entry from the parameter.
 1014  */
 1015 static void
 1016 init_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp)
 1017 {
 1018     rt->mfc_origin     = mfccp->mfcc_origin;
 1019     rt->mfc_mcastgrp   = mfccp->mfcc_mcastgrp;
 1020 
 1021     update_mfc_params(rt, mfccp);
 1022 
 1023     /* initialize pkt counters per src-grp */
 1024     rt->mfc_pkt_cnt    = 0;
 1025     rt->mfc_byte_cnt   = 0;
 1026     rt->mfc_wrong_if   = 0;
 1027     timevalclear(&rt->mfc_last_assert);
 1028 }
 1029 
 1030 static void
 1031 expire_mfc(struct mfc *rt)
 1032 {
 1033         struct rtdetq *rte, *nrte;
 1034 
 1035         MFC_LOCK_ASSERT();
 1036 
 1037         free_bw_list(rt->mfc_bw_meter);
 1038 
 1039         TAILQ_FOREACH_SAFE(rte, &rt->mfc_stall, rte_link, nrte) {
 1040                 m_freem(rte->m);
 1041                 TAILQ_REMOVE(&rt->mfc_stall, rte, rte_link);
 1042                 free(rte, M_MRTABLE);
 1043         }
 1044 
 1045         LIST_REMOVE(rt, mfc_hash);
 1046         free(rt, M_MRTABLE);
 1047 }
 1048 
 1049 /*
 1050  * Add an mfc entry
 1051  */
 1052 static int
 1053 add_mfc(struct mfcctl2 *mfccp)
 1054 {
 1055     struct mfc *rt;
 1056     struct rtdetq *rte, *nrte;
 1057     u_long hash = 0;
 1058     u_short nstl;
 1059 
 1060     VIF_LOCK();
 1061     MFC_LOCK();
 1062 
 1063     rt = mfc_find(&mfccp->mfcc_origin, &mfccp->mfcc_mcastgrp);
 1064 
 1065     /* If an entry already exists, just update the fields */
 1066     if (rt) {
 1067         CTR4(KTR_IPMF, "%s: update mfc orig %s group %lx parent %x",
 1068             __func__, inet_ntoa(mfccp->mfcc_origin),
 1069             (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
 1070             mfccp->mfcc_parent);
 1071         update_mfc_params(rt, mfccp);
 1072         MFC_UNLOCK();
 1073         VIF_UNLOCK();
 1074         return (0);
 1075     }
 1076 
 1077     /*
 1078      * Find the entry for which the upcall was made and update
 1079      */
 1080     nstl = 0;
 1081     hash = MFCHASH(mfccp->mfcc_origin, mfccp->mfcc_mcastgrp);
 1082     LIST_FOREACH(rt, &V_mfchashtbl[hash], mfc_hash) {
 1083         if (in_hosteq(rt->mfc_origin, mfccp->mfcc_origin) &&
 1084             in_hosteq(rt->mfc_mcastgrp, mfccp->mfcc_mcastgrp) &&
 1085             !TAILQ_EMPTY(&rt->mfc_stall)) {
 1086                 CTR5(KTR_IPMF,
 1087                     "%s: add mfc orig %s group %lx parent %x qh %p",
 1088                     __func__, inet_ntoa(mfccp->mfcc_origin),
 1089                     (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
 1090                     mfccp->mfcc_parent,
 1091                     TAILQ_FIRST(&rt->mfc_stall));
 1092                 if (nstl++)
 1093                         CTR1(KTR_IPMF, "%s: multiple matches", __func__);
 1094 
 1095                 init_mfc_params(rt, mfccp);
 1096                 rt->mfc_expire = 0;     /* Don't clean this guy up */
 1097                 V_nexpire[hash]--;
 1098 
 1099                 /* Free queued packets, but attempt to forward them first. */
 1100                 TAILQ_FOREACH_SAFE(rte, &rt->mfc_stall, rte_link, nrte) {
 1101                         if (rte->ifp != NULL)
 1102                                 ip_mdq(rte->m, rte->ifp, rt, -1);
 1103                         m_freem(rte->m);
 1104                         TAILQ_REMOVE(&rt->mfc_stall, rte, rte_link);
 1105                         rt->mfc_nstall--;
 1106                         free(rte, M_MRTABLE);
 1107                 }
 1108         }
 1109     }
 1110 
 1111     /*
 1112      * It is possible that an entry is being inserted without an upcall
 1113      */
 1114     if (nstl == 0) {
 1115         CTR1(KTR_IPMF, "%s: adding mfc w/o upcall", __func__);
 1116         LIST_FOREACH(rt, &V_mfchashtbl[hash], mfc_hash) {
 1117                 if (in_hosteq(rt->mfc_origin, mfccp->mfcc_origin) &&
 1118                     in_hosteq(rt->mfc_mcastgrp, mfccp->mfcc_mcastgrp)) {
 1119                         init_mfc_params(rt, mfccp);
 1120                         if (rt->mfc_expire)
 1121                             V_nexpire[hash]--;
 1122                         rt->mfc_expire = 0;
 1123                         break; /* XXX */
 1124                 }
 1125         }
 1126 
 1127         if (rt == NULL) {               /* no upcall, so make a new entry */
 1128             rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
 1129             if (rt == NULL) {
 1130                 MFC_UNLOCK();
 1131                 VIF_UNLOCK();
 1132                 return (ENOBUFS);
 1133             }
 1134 
 1135             init_mfc_params(rt, mfccp);
 1136             TAILQ_INIT(&rt->mfc_stall);
 1137             rt->mfc_nstall = 0;
 1138 
 1139             rt->mfc_expire     = 0;
 1140             rt->mfc_bw_meter = NULL;
 1141 
 1142             /* insert new entry at head of hash chain */
 1143             LIST_INSERT_HEAD(&V_mfchashtbl[hash], rt, mfc_hash);
 1144         }
 1145     }
 1146 
 1147     MFC_UNLOCK();
 1148     VIF_UNLOCK();
 1149 
 1150     return (0);
 1151 }
 1152 
 1153 /*
 1154  * Delete an mfc entry
 1155  */
 1156 static int
 1157 del_mfc(struct mfcctl2 *mfccp)
 1158 {
 1159     struct in_addr      origin;
 1160     struct in_addr      mcastgrp;
 1161     struct mfc          *rt;
 1162 
 1163     origin = mfccp->mfcc_origin;
 1164     mcastgrp = mfccp->mfcc_mcastgrp;
 1165 
 1166     CTR3(KTR_IPMF, "%s: delete mfc orig %s group %lx", __func__,
 1167         inet_ntoa(origin), (u_long)ntohl(mcastgrp.s_addr));
 1168 
 1169     MFC_LOCK();
 1170 
 1171     rt = mfc_find(&origin, &mcastgrp);
 1172     if (rt == NULL) {
 1173         MFC_UNLOCK();
 1174         return EADDRNOTAVAIL;
 1175     }
 1176 
 1177     /*
 1178      * free the bw_meter entries
 1179      */
 1180     free_bw_list(rt->mfc_bw_meter);
 1181     rt->mfc_bw_meter = NULL;
 1182 
 1183     LIST_REMOVE(rt, mfc_hash);
 1184     free(rt, M_MRTABLE);
 1185 
 1186     MFC_UNLOCK();
 1187 
 1188     return (0);
 1189 }
 1190 
 1191 /*
 1192  * Send a message to the routing daemon on the multicast routing socket.
 1193  */
 1194 static int
 1195 socket_send(struct socket *s, struct mbuf *mm, struct sockaddr_in *src)
 1196 {
 1197     if (s) {
 1198         SOCKBUF_LOCK(&s->so_rcv);
 1199         if (sbappendaddr_locked(&s->so_rcv, (struct sockaddr *)src, mm,
 1200             NULL) != 0) {
 1201             sorwakeup_locked(s);
 1202             return 0;
 1203         }
 1204         SOCKBUF_UNLOCK(&s->so_rcv);
 1205     }
 1206     m_freem(mm);
 1207     return -1;
 1208 }
 1209 
 1210 /*
 1211  * IP multicast forwarding function. This function assumes that the packet
 1212  * pointed to by "ip" has arrived on (or is about to be sent to) the interface
 1213  * pointed to by "ifp", and the packet is to be relayed to other networks
 1214  * that have members of the packet's destination IP multicast group.
 1215  *
 1216  * The packet is returned unscathed to the caller, unless it is
 1217  * erroneous, in which case a non-zero return value tells the caller to
 1218  * discard it.
 1219  */
 1220 
 1221 #define TUNNEL_LEN  12  /* # bytes of IP option for tunnel encapsulation  */
 1222 
 1223 static int
 1224 X_ip_mforward(struct ip *ip, struct ifnet *ifp, struct mbuf *m,
 1225     struct ip_moptions *imo)
 1226 {
 1227     struct mfc *rt;
 1228     int error;
 1229     vifi_t vifi;
 1230 
 1231     CTR3(KTR_IPMF, "ip_mforward: delete mfc orig %s group %lx ifp %p",
 1232         inet_ntoa(ip->ip_src), (u_long)ntohl(ip->ip_dst.s_addr), ifp);
 1233 
 1234     if (ip->ip_hl < (sizeof(struct ip) + TUNNEL_LEN) >> 2 ||
 1235                 ((u_char *)(ip + 1))[1] != IPOPT_LSRR ) {
 1236         /*
 1237          * Packet arrived via a physical interface or
 1238          * an encapsulated tunnel or a register_vif.
 1239          */
 1240     } else {
 1241         /*
 1242          * Packet arrived through a source-route tunnel.
 1243          * Source-route tunnels are no longer supported.
 1244          */
 1245         return (1);
 1246     }
 1247 
 1248     VIF_LOCK();
 1249     MFC_LOCK();
 1250     if (imo && ((vifi = imo->imo_multicast_vif) < V_numvifs)) {
 1251         if (ip->ip_ttl < MAXTTL)
 1252             ip->ip_ttl++;       /* compensate for -1 in *_send routines */
 1253         error = ip_mdq(m, ifp, NULL, vifi);
 1254         MFC_UNLOCK();
 1255         VIF_UNLOCK();
 1256         return error;
 1257     }
 1258 
 1259     /*
 1260      * Don't forward a packet with time-to-live of zero or one,
 1261      * or a packet destined to a local-only group.
 1262      */
 1263     if (ip->ip_ttl <= 1 || IN_LOCAL_GROUP(ntohl(ip->ip_dst.s_addr))) {
 1264         MFC_UNLOCK();
 1265         VIF_UNLOCK();
 1266         return 0;
 1267     }
 1268 
 1269     /*
 1270      * Determine forwarding vifs from the forwarding cache table
 1271      */
 1272     MRTSTAT_INC(mrts_mfc_lookups);
 1273     rt = mfc_find(&ip->ip_src, &ip->ip_dst);
 1274 
 1275     /* Entry exists, so forward if necessary */
 1276     if (rt != NULL) {
 1277         error = ip_mdq(m, ifp, rt, -1);
 1278         MFC_UNLOCK();
 1279         VIF_UNLOCK();
 1280         return error;
 1281     } else {
 1282         /*
 1283          * If we don't have a route for packet's origin,
 1284          * Make a copy of the packet & send message to routing daemon
 1285          */
 1286 
 1287         struct mbuf *mb0;
 1288         struct rtdetq *rte;
 1289         u_long hash;
 1290         int hlen = ip->ip_hl << 2;
 1291 
 1292         MRTSTAT_INC(mrts_mfc_misses);
 1293         MRTSTAT_INC(mrts_no_route);
 1294         CTR2(KTR_IPMF, "ip_mforward: no mfc for (%s,%lx)",
 1295             inet_ntoa(ip->ip_src), (u_long)ntohl(ip->ip_dst.s_addr));
 1296 
 1297         /*
 1298          * Allocate mbufs early so that we don't do extra work if we are
 1299          * just going to fail anyway.  Make sure to pullup the header so
 1300          * that other people can't step on it.
 1301          */
 1302         rte = (struct rtdetq *)malloc((sizeof *rte), M_MRTABLE,
 1303             M_NOWAIT|M_ZERO);
 1304         if (rte == NULL) {
 1305             MFC_UNLOCK();
 1306             VIF_UNLOCK();
 1307             return ENOBUFS;
 1308         }
 1309 
 1310         mb0 = m_copypacket(m, M_DONTWAIT);
 1311         if (mb0 && (M_HASCL(mb0) || mb0->m_len < hlen))
 1312             mb0 = m_pullup(mb0, hlen);
 1313         if (mb0 == NULL) {
 1314             free(rte, M_MRTABLE);
 1315             MFC_UNLOCK();
 1316             VIF_UNLOCK();
 1317             return ENOBUFS;
 1318         }
 1319 
 1320         /* is there an upcall waiting for this flow ? */
 1321         hash = MFCHASH(ip->ip_src, ip->ip_dst);
 1322         LIST_FOREACH(rt, &V_mfchashtbl[hash], mfc_hash) {
 1323                 if (in_hosteq(ip->ip_src, rt->mfc_origin) &&
 1324                     in_hosteq(ip->ip_dst, rt->mfc_mcastgrp) &&
 1325                     !TAILQ_EMPTY(&rt->mfc_stall))
 1326                         break;
 1327         }
 1328 
 1329         if (rt == NULL) {
 1330             int i;
 1331             struct igmpmsg *im;
 1332             struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
 1333             struct mbuf *mm;
 1334 
 1335             /*
 1336              * Locate the vifi for the incoming interface for this packet.
 1337              * If none found, drop packet.
 1338              */
 1339             for (vifi = 0; vifi < V_numvifs &&
 1340                     V_viftable[vifi].v_ifp != ifp; vifi++)
 1341                 ;
 1342             if (vifi >= V_numvifs)      /* vif not found, drop packet */
 1343                 goto non_fatal;
 1344 
 1345             /* no upcall, so make a new entry */
 1346             rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
 1347             if (rt == NULL)
 1348                 goto fail;
 1349 
 1350             /* Make a copy of the header to send to the user level process */
 1351             mm = m_copy(mb0, 0, hlen);
 1352             if (mm == NULL)
 1353                 goto fail1;
 1354 
 1355             /*
 1356              * Send message to routing daemon to install
 1357              * a route into the kernel table
 1358              */
 1359 
 1360             im = mtod(mm, struct igmpmsg *);
 1361             im->im_msgtype = IGMPMSG_NOCACHE;
 1362             im->im_mbz = 0;
 1363             im->im_vif = vifi;
 1364 
 1365             MRTSTAT_INC(mrts_upcalls);
 1366 
 1367             k_igmpsrc.sin_addr = ip->ip_src;
 1368             if (socket_send(V_ip_mrouter, mm, &k_igmpsrc) < 0) {
 1369                 CTR0(KTR_IPMF, "ip_mforward: socket queue full");
 1370                 MRTSTAT_INC(mrts_upq_sockfull);
 1371 fail1:
 1372                 free(rt, M_MRTABLE);
 1373 fail:
 1374                 free(rte, M_MRTABLE);
 1375                 m_freem(mb0);
 1376                 MFC_UNLOCK();
 1377                 VIF_UNLOCK();
 1378                 return ENOBUFS;
 1379             }
 1380 
 1381             /* insert new entry at head of hash chain */
 1382             rt->mfc_origin.s_addr     = ip->ip_src.s_addr;
 1383             rt->mfc_mcastgrp.s_addr   = ip->ip_dst.s_addr;
 1384             rt->mfc_expire            = UPCALL_EXPIRE;
 1385             V_nexpire[hash]++;
 1386             for (i = 0; i < V_numvifs; i++) {
 1387                 rt->mfc_ttls[i] = 0;
 1388                 rt->mfc_flags[i] = 0;
 1389             }
 1390             rt->mfc_parent = -1;
 1391 
 1392             /* clear the RP address */
 1393             rt->mfc_rp.s_addr = INADDR_ANY;
 1394             rt->mfc_bw_meter = NULL;
 1395 
 1396             /* initialize pkt counters per src-grp */
 1397             rt->mfc_pkt_cnt = 0;
 1398             rt->mfc_byte_cnt = 0;
 1399             rt->mfc_wrong_if = 0;
 1400             timevalclear(&rt->mfc_last_assert);
 1401 
 1402             TAILQ_INIT(&rt->mfc_stall);
 1403             rt->mfc_nstall = 0;
 1404 
 1405             /* link into table */
 1406             LIST_INSERT_HEAD(&V_mfchashtbl[hash], rt, mfc_hash);
 1407             TAILQ_INSERT_HEAD(&rt->mfc_stall, rte, rte_link);
 1408             rt->mfc_nstall++;
 1409 
 1410         } else {
 1411             /* determine if queue has overflowed */
 1412             if (rt->mfc_nstall > MAX_UPQ) {
 1413                 MRTSTAT_INC(mrts_upq_ovflw);
 1414 non_fatal:
 1415                 free(rte, M_MRTABLE);
 1416                 m_freem(mb0);
 1417                 MFC_UNLOCK();
 1418                 VIF_UNLOCK();
 1419                 return (0);
 1420             }
 1421             TAILQ_INSERT_TAIL(&rt->mfc_stall, rte, rte_link);
 1422             rt->mfc_nstall++;
 1423         }
 1424 
 1425         rte->m                  = mb0;
 1426         rte->ifp                = ifp;
 1427 
 1428         MFC_UNLOCK();
 1429         VIF_UNLOCK();
 1430 
 1431         return 0;
 1432     }
 1433 }
 1434 
 1435 /*
 1436  * Clean up the cache entry if upcall is not serviced
 1437  */
 1438 static void
 1439 expire_upcalls(void *arg)
 1440 {
 1441     int i;
 1442 
 1443     CURVNET_SET((struct vnet *) arg);
 1444 
 1445     MFC_LOCK();
 1446 
 1447     for (i = 0; i < mfchashsize; i++) {
 1448         struct mfc *rt, *nrt;
 1449 
 1450         if (V_nexpire[i] == 0)
 1451             continue;
 1452 
 1453         for (rt = LIST_FIRST(&V_mfchashtbl[i]); rt; rt = nrt) {
 1454                 nrt = LIST_NEXT(rt, mfc_hash);
 1455 
 1456                 if (TAILQ_EMPTY(&rt->mfc_stall))
 1457                         continue;
 1458 
 1459                 if (rt->mfc_expire == 0 || --rt->mfc_expire > 0)
 1460                         continue;
 1461 
 1462                 /*
 1463                  * free the bw_meter entries
 1464                  */
 1465                 while (rt->mfc_bw_meter != NULL) {
 1466                     struct bw_meter *x = rt->mfc_bw_meter;
 1467 
 1468                     rt->mfc_bw_meter = x->bm_mfc_next;
 1469                     free(x, M_BWMETER);
 1470                 }
 1471 
 1472                 MRTSTAT_INC(mrts_cache_cleanups);
 1473                 CTR3(KTR_IPMF, "%s: expire (%lx, %lx)", __func__,
 1474                     (u_long)ntohl(rt->mfc_origin.s_addr),
 1475                     (u_long)ntohl(rt->mfc_mcastgrp.s_addr));
 1476 
 1477                 expire_mfc(rt);
 1478             }
 1479     }
 1480 
 1481     MFC_UNLOCK();
 1482 
 1483     callout_reset(&V_expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls,
 1484         curvnet);
 1485 
 1486     CURVNET_RESTORE();
 1487 }
 1488 
 1489 /*
 1490  * Packet forwarding routine once entry in the cache is made
 1491  */
 1492 static int
 1493 ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif)
 1494 {
 1495     struct ip  *ip = mtod(m, struct ip *);
 1496     vifi_t vifi;
 1497     int plen = ip->ip_len;
 1498 
 1499     VIF_LOCK_ASSERT();
 1500 
 1501     /*
 1502      * If xmt_vif is not -1, send on only the requested vif.
 1503      *
 1504      * (since vifi_t is u_short, -1 becomes MAXUSHORT, which > numvifs.)
 1505      */
 1506     if (xmt_vif < V_numvifs) {
 1507         if (V_viftable[xmt_vif].v_flags & VIFF_REGISTER)
 1508                 pim_register_send(ip, V_viftable + xmt_vif, m, rt);
 1509         else
 1510                 phyint_send(ip, V_viftable + xmt_vif, m);
 1511         return 1;
 1512     }
 1513 
 1514     /*
 1515      * Don't forward if it didn't arrive from the parent vif for its origin.
 1516      */
 1517     vifi = rt->mfc_parent;
 1518     if ((vifi >= V_numvifs) || (V_viftable[vifi].v_ifp != ifp)) {
 1519         CTR4(KTR_IPMF, "%s: rx on wrong ifp %p (vifi %d, v_ifp %p)",
 1520             __func__, ifp, (int)vifi, V_viftable[vifi].v_ifp);
 1521         MRTSTAT_INC(mrts_wrong_if);
 1522         ++rt->mfc_wrong_if;
 1523         /*
 1524          * If we are doing PIM assert processing, send a message
 1525          * to the routing daemon.
 1526          *
 1527          * XXX: A PIM-SM router needs the WRONGVIF detection so it
 1528          * can complete the SPT switch, regardless of the type
 1529          * of the iif (broadcast media, GRE tunnel, etc).
 1530          */
 1531         if (V_pim_assert_enabled && (vifi < V_numvifs) &&
 1532             V_viftable[vifi].v_ifp) {
 1533 
 1534             if (ifp == &V_multicast_register_if)
 1535                 PIMSTAT_INC(pims_rcv_registers_wrongiif);
 1536 
 1537             /* Get vifi for the incoming packet */
 1538             for (vifi = 0; vifi < V_numvifs && V_viftable[vifi].v_ifp != ifp;
 1539                 vifi++)
 1540                 ;
 1541             if (vifi >= V_numvifs)
 1542                 return 0;       /* The iif is not found: ignore the packet. */
 1543 
 1544             if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_DISABLE_WRONGVIF)
 1545                 return 0;       /* WRONGVIF disabled: ignore the packet */
 1546 
 1547             if (ratecheck(&rt->mfc_last_assert, &pim_assert_interval)) {
 1548                 struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
 1549                 struct igmpmsg *im;
 1550                 int hlen = ip->ip_hl << 2;
 1551                 struct mbuf *mm = m_copy(m, 0, hlen);
 1552 
 1553                 if (mm && (M_HASCL(mm) || mm->m_len < hlen))
 1554                     mm = m_pullup(mm, hlen);
 1555                 if (mm == NULL)
 1556                     return ENOBUFS;
 1557 
 1558                 im = mtod(mm, struct igmpmsg *);
 1559                 im->im_msgtype  = IGMPMSG_WRONGVIF;
 1560                 im->im_mbz              = 0;
 1561                 im->im_vif              = vifi;
 1562 
 1563                 MRTSTAT_INC(mrts_upcalls);
 1564 
 1565                 k_igmpsrc.sin_addr = im->im_src;
 1566                 if (socket_send(V_ip_mrouter, mm, &k_igmpsrc) < 0) {
 1567                     CTR1(KTR_IPMF, "%s: socket queue full", __func__);
 1568                     MRTSTAT_INC(mrts_upq_sockfull);
 1569                     return ENOBUFS;
 1570                 }
 1571             }
 1572         }
 1573         return 0;
 1574     }
 1575 
 1576 
 1577     /* If I sourced this packet, it counts as output, else it was input. */
 1578     if (in_hosteq(ip->ip_src, V_viftable[vifi].v_lcl_addr)) {
 1579         V_viftable[vifi].v_pkt_out++;
 1580         V_viftable[vifi].v_bytes_out += plen;
 1581     } else {
 1582         V_viftable[vifi].v_pkt_in++;
 1583         V_viftable[vifi].v_bytes_in += plen;
 1584     }
 1585     rt->mfc_pkt_cnt++;
 1586     rt->mfc_byte_cnt += plen;
 1587 
 1588     /*
 1589      * For each vif, decide if a copy of the packet should be forwarded.
 1590      * Forward if:
 1591      *          - the ttl exceeds the vif's threshold
 1592      *          - there are group members downstream on interface
 1593      */
 1594     for (vifi = 0; vifi < V_numvifs; vifi++)
 1595         if ((rt->mfc_ttls[vifi] > 0) && (ip->ip_ttl > rt->mfc_ttls[vifi])) {
 1596             V_viftable[vifi].v_pkt_out++;
 1597             V_viftable[vifi].v_bytes_out += plen;
 1598             if (V_viftable[vifi].v_flags & VIFF_REGISTER)
 1599                 pim_register_send(ip, V_viftable + vifi, m, rt);
 1600             else
 1601                 phyint_send(ip, V_viftable + vifi, m);
 1602         }
 1603 
 1604     /*
 1605      * Perform upcall-related bw measuring.
 1606      */
 1607     if (rt->mfc_bw_meter != NULL) {
 1608         struct bw_meter *x;
 1609         struct timeval now;
 1610 
 1611         microtime(&now);
 1612         MFC_LOCK_ASSERT();
 1613         for (x = rt->mfc_bw_meter; x != NULL; x = x->bm_mfc_next)
 1614             bw_meter_receive_packet(x, plen, &now);
 1615     }
 1616 
 1617     return 0;
 1618 }
 1619 
 1620 /*
 1621  * Check if a vif number is legal/ok. This is used by in_mcast.c.
 1622  */
 1623 static int
 1624 X_legal_vif_num(int vif)
 1625 {
 1626         int ret;
 1627 
 1628         ret = 0;
 1629         if (vif < 0)
 1630                 return (ret);
 1631 
 1632         VIF_LOCK();
 1633         if (vif < V_numvifs)
 1634                 ret = 1;
 1635         VIF_UNLOCK();
 1636 
 1637         return (ret);
 1638 }
 1639 
 1640 /*
 1641  * Return the local address used by this vif
 1642  */
 1643 static u_long
 1644 X_ip_mcast_src(int vifi)
 1645 {
 1646         in_addr_t addr;
 1647 
 1648         addr = INADDR_ANY;
 1649         if (vifi < 0)
 1650                 return (addr);
 1651 
 1652         VIF_LOCK();
 1653         if (vifi < V_numvifs)
 1654                 addr = V_viftable[vifi].v_lcl_addr.s_addr;
 1655         VIF_UNLOCK();
 1656 
 1657         return (addr);
 1658 }
 1659 
 1660 static void
 1661 phyint_send(struct ip *ip, struct vif *vifp, struct mbuf *m)
 1662 {
 1663     struct mbuf *mb_copy;
 1664     int hlen = ip->ip_hl << 2;
 1665 
 1666     VIF_LOCK_ASSERT();
 1667 
 1668     /*
 1669      * Make a new reference to the packet; make sure that
 1670      * the IP header is actually copied, not just referenced,
 1671      * so that ip_output() only scribbles on the copy.
 1672      */
 1673     mb_copy = m_copypacket(m, M_DONTWAIT);
 1674     if (mb_copy && (M_HASCL(mb_copy) || mb_copy->m_len < hlen))
 1675         mb_copy = m_pullup(mb_copy, hlen);
 1676     if (mb_copy == NULL)
 1677         return;
 1678 
 1679     send_packet(vifp, mb_copy);
 1680 }
 1681 
 1682 static void
 1683 send_packet(struct vif *vifp, struct mbuf *m)
 1684 {
 1685         struct ip_moptions imo;
 1686         struct in_multi *imm[2];
 1687         int error;
 1688 
 1689         VIF_LOCK_ASSERT();
 1690 
 1691         imo.imo_multicast_ifp  = vifp->v_ifp;
 1692         imo.imo_multicast_ttl  = mtod(m, struct ip *)->ip_ttl - 1;
 1693         imo.imo_multicast_loop = 1;
 1694         imo.imo_multicast_vif  = -1;
 1695         imo.imo_num_memberships = 0;
 1696         imo.imo_max_memberships = 2;
 1697         imo.imo_membership  = &imm[0];
 1698 
 1699         /*
 1700          * Re-entrancy should not be a problem here, because
 1701          * the packets that we send out and are looped back at us
 1702          * should get rejected because they appear to come from
 1703          * the loopback interface, thus preventing looping.
 1704          */
 1705         error = ip_output(m, NULL, &vifp->v_route, IP_FORWARDING, &imo, NULL);
 1706         CTR3(KTR_IPMF, "%s: vif %td err %d", __func__,
 1707             (ptrdiff_t)(vifp - V_viftable), error);
 1708 }
 1709 
 1710 /*
 1711  * Stubs for old RSVP socket shim implementation.
 1712  */
 1713 
 1714 static int
 1715 X_ip_rsvp_vif(struct socket *so __unused, struct sockopt *sopt __unused)
 1716 {
 1717 
 1718         return (EOPNOTSUPP);
 1719 }
 1720 
 1721 static void
 1722 X_ip_rsvp_force_done(struct socket *so __unused)
 1723 {
 1724 
 1725 }
 1726 
 1727 static void
 1728 X_rsvp_input(struct mbuf *m, int off __unused)
 1729 {
 1730 
 1731         if (!V_rsvp_on)
 1732                 m_freem(m);
 1733 }
 1734 
 1735 /*
 1736  * Code for bandwidth monitors
 1737  */
 1738 
 1739 /*
 1740  * Define common interface for timeval-related methods
 1741  */
 1742 #define BW_TIMEVALCMP(tvp, uvp, cmp) timevalcmp((tvp), (uvp), cmp)
 1743 #define BW_TIMEVALDECR(vvp, uvp) timevalsub((vvp), (uvp))
 1744 #define BW_TIMEVALADD(vvp, uvp) timevaladd((vvp), (uvp))
 1745 
 1746 static uint32_t
 1747 compute_bw_meter_flags(struct bw_upcall *req)
 1748 {
 1749     uint32_t flags = 0;
 1750 
 1751     if (req->bu_flags & BW_UPCALL_UNIT_PACKETS)
 1752         flags |= BW_METER_UNIT_PACKETS;
 1753     if (req->bu_flags & BW_UPCALL_UNIT_BYTES)
 1754         flags |= BW_METER_UNIT_BYTES;
 1755     if (req->bu_flags & BW_UPCALL_GEQ)
 1756         flags |= BW_METER_GEQ;
 1757     if (req->bu_flags & BW_UPCALL_LEQ)
 1758         flags |= BW_METER_LEQ;
 1759 
 1760     return flags;
 1761 }
 1762 
 1763 /*
 1764  * Add a bw_meter entry
 1765  */
 1766 static int
 1767 add_bw_upcall(struct bw_upcall *req)
 1768 {
 1769     struct mfc *mfc;
 1770     struct timeval delta = { BW_UPCALL_THRESHOLD_INTERVAL_MIN_SEC,
 1771                 BW_UPCALL_THRESHOLD_INTERVAL_MIN_USEC };
 1772     struct timeval now;
 1773     struct bw_meter *x;
 1774     uint32_t flags;
 1775 
 1776     if (!(V_mrt_api_config & MRT_MFC_BW_UPCALL))
 1777         return EOPNOTSUPP;
 1778 
 1779     /* Test if the flags are valid */
 1780     if (!(req->bu_flags & (BW_UPCALL_UNIT_PACKETS | BW_UPCALL_UNIT_BYTES)))
 1781         return EINVAL;
 1782     if (!(req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ)))
 1783         return EINVAL;
 1784     if ((req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ))
 1785             == (BW_UPCALL_GEQ | BW_UPCALL_LEQ))
 1786         return EINVAL;
 1787 
 1788     /* Test if the threshold time interval is valid */
 1789     if (BW_TIMEVALCMP(&req->bu_threshold.b_time, &delta, <))
 1790         return EINVAL;
 1791 
 1792     flags = compute_bw_meter_flags(req);
 1793 
 1794     /*
 1795      * Find if we have already same bw_meter entry
 1796      */
 1797     MFC_LOCK();
 1798     mfc = mfc_find(&req->bu_src, &req->bu_dst);
 1799     if (mfc == NULL) {
 1800         MFC_UNLOCK();
 1801         return EADDRNOTAVAIL;
 1802     }
 1803     for (x = mfc->mfc_bw_meter; x != NULL; x = x->bm_mfc_next) {
 1804         if ((BW_TIMEVALCMP(&x->bm_threshold.b_time,
 1805                            &req->bu_threshold.b_time, ==)) &&
 1806             (x->bm_threshold.b_packets == req->bu_threshold.b_packets) &&
 1807             (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) &&
 1808             (x->bm_flags & BW_METER_USER_FLAGS) == flags)  {
 1809             MFC_UNLOCK();
 1810             return 0;           /* XXX Already installed */
 1811         }
 1812     }
 1813 
 1814     /* Allocate the new bw_meter entry */
 1815     x = (struct bw_meter *)malloc(sizeof(*x), M_BWMETER, M_NOWAIT);
 1816     if (x == NULL) {
 1817         MFC_UNLOCK();
 1818         return ENOBUFS;
 1819     }
 1820 
 1821     /* Set the new bw_meter entry */
 1822     x->bm_threshold.b_time = req->bu_threshold.b_time;
 1823     microtime(&now);
 1824     x->bm_start_time = now;
 1825     x->bm_threshold.b_packets = req->bu_threshold.b_packets;
 1826     x->bm_threshold.b_bytes = req->bu_threshold.b_bytes;
 1827     x->bm_measured.b_packets = 0;
 1828     x->bm_measured.b_bytes = 0;
 1829     x->bm_flags = flags;
 1830     x->bm_time_next = NULL;
 1831     x->bm_time_hash = BW_METER_BUCKETS;
 1832 
 1833     /* Add the new bw_meter entry to the front of entries for this MFC */
 1834     x->bm_mfc = mfc;
 1835     x->bm_mfc_next = mfc->mfc_bw_meter;
 1836     mfc->mfc_bw_meter = x;
 1837     schedule_bw_meter(x, &now);
 1838     MFC_UNLOCK();
 1839 
 1840     return 0;
 1841 }
 1842 
 1843 static void
 1844 free_bw_list(struct bw_meter *list)
 1845 {
 1846     while (list != NULL) {
 1847         struct bw_meter *x = list;
 1848 
 1849         list = list->bm_mfc_next;
 1850         unschedule_bw_meter(x);
 1851         free(x, M_BWMETER);
 1852     }
 1853 }
 1854 
 1855 /*
 1856  * Delete one or multiple bw_meter entries
 1857  */
 1858 static int
 1859 del_bw_upcall(struct bw_upcall *req)
 1860 {
 1861     struct mfc *mfc;
 1862     struct bw_meter *x;
 1863 
 1864     if (!(V_mrt_api_config & MRT_MFC_BW_UPCALL))
 1865         return EOPNOTSUPP;
 1866 
 1867     MFC_LOCK();
 1868 
 1869     /* Find the corresponding MFC entry */
 1870     mfc = mfc_find(&req->bu_src, &req->bu_dst);
 1871     if (mfc == NULL) {
 1872         MFC_UNLOCK();
 1873         return EADDRNOTAVAIL;
 1874     } else if (req->bu_flags & BW_UPCALL_DELETE_ALL) {
 1875         /*
 1876          * Delete all bw_meter entries for this mfc
 1877          */
 1878         struct bw_meter *list;
 1879 
 1880         list = mfc->mfc_bw_meter;
 1881         mfc->mfc_bw_meter = NULL;
 1882         free_bw_list(list);
 1883         MFC_UNLOCK();
 1884         return 0;
 1885     } else {                    /* Delete a single bw_meter entry */
 1886         struct bw_meter *prev;
 1887         uint32_t flags = 0;
 1888 
 1889         flags = compute_bw_meter_flags(req);
 1890 
 1891         /* Find the bw_meter entry to delete */
 1892         for (prev = NULL, x = mfc->mfc_bw_meter; x != NULL;
 1893              prev = x, x = x->bm_mfc_next) {
 1894             if ((BW_TIMEVALCMP(&x->bm_threshold.b_time,
 1895                                &req->bu_threshold.b_time, ==)) &&
 1896                 (x->bm_threshold.b_packets == req->bu_threshold.b_packets) &&
 1897                 (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) &&
 1898                 (x->bm_flags & BW_METER_USER_FLAGS) == flags)
 1899                 break;
 1900         }
 1901         if (x != NULL) { /* Delete entry from the list for this MFC */
 1902             if (prev != NULL)
 1903                 prev->bm_mfc_next = x->bm_mfc_next;     /* remove from middle*/
 1904             else
 1905                 x->bm_mfc->mfc_bw_meter = x->bm_mfc_next;/* new head of list */
 1906 
 1907             unschedule_bw_meter(x);
 1908             MFC_UNLOCK();
 1909             /* Free the bw_meter entry */
 1910             free(x, M_BWMETER);
 1911             return 0;
 1912         } else {
 1913             MFC_UNLOCK();
 1914             return EINVAL;
 1915         }
 1916     }
 1917     /* NOTREACHED */
 1918 }
 1919 
 1920 /*
 1921  * Perform bandwidth measurement processing that may result in an upcall
 1922  */
 1923 static void
 1924 bw_meter_receive_packet(struct bw_meter *x, int plen, struct timeval *nowp)
 1925 {
 1926     struct timeval delta;
 1927 
 1928     MFC_LOCK_ASSERT();
 1929 
 1930     delta = *nowp;
 1931     BW_TIMEVALDECR(&delta, &x->bm_start_time);
 1932 
 1933     if (x->bm_flags & BW_METER_GEQ) {
 1934         /*
 1935          * Processing for ">=" type of bw_meter entry
 1936          */
 1937         if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) {
 1938             /* Reset the bw_meter entry */
 1939             x->bm_start_time = *nowp;
 1940             x->bm_measured.b_packets = 0;
 1941             x->bm_measured.b_bytes = 0;
 1942             x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
 1943         }
 1944 
 1945         /* Record that a packet is received */
 1946         x->bm_measured.b_packets++;
 1947         x->bm_measured.b_bytes += plen;
 1948 
 1949         /*
 1950          * Test if we should deliver an upcall
 1951          */
 1952         if (!(x->bm_flags & BW_METER_UPCALL_DELIVERED)) {
 1953             if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
 1954                  (x->bm_measured.b_packets >= x->bm_threshold.b_packets)) ||
 1955                 ((x->bm_flags & BW_METER_UNIT_BYTES) &&
 1956                  (x->bm_measured.b_bytes >= x->bm_threshold.b_bytes))) {
 1957                 /* Prepare an upcall for delivery */
 1958                 bw_meter_prepare_upcall(x, nowp);
 1959                 x->bm_flags |= BW_METER_UPCALL_DELIVERED;
 1960             }
 1961         }
 1962     } else if (x->bm_flags & BW_METER_LEQ) {
 1963         /*
 1964          * Processing for "<=" type of bw_meter entry
 1965          */
 1966         if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) {
 1967             /*
 1968              * We are behind time with the multicast forwarding table
 1969              * scanning for "<=" type of bw_meter entries, so test now
 1970              * if we should deliver an upcall.
 1971              */
 1972             if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
 1973                  (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) ||
 1974                 ((x->bm_flags & BW_METER_UNIT_BYTES) &&
 1975                  (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) {
 1976                 /* Prepare an upcall for delivery */
 1977                 bw_meter_prepare_upcall(x, nowp);
 1978             }
 1979             /* Reschedule the bw_meter entry */
 1980             unschedule_bw_meter(x);
 1981             schedule_bw_meter(x, nowp);
 1982         }
 1983 
 1984         /* Record that a packet is received */
 1985         x->bm_measured.b_packets++;
 1986         x->bm_measured.b_bytes += plen;
 1987 
 1988         /*
 1989          * Test if we should restart the measuring interval
 1990          */
 1991         if ((x->bm_flags & BW_METER_UNIT_PACKETS &&
 1992              x->bm_measured.b_packets <= x->bm_threshold.b_packets) ||
 1993             (x->bm_flags & BW_METER_UNIT_BYTES &&
 1994              x->bm_measured.b_bytes <= x->bm_threshold.b_bytes)) {
 1995             /* Don't restart the measuring interval */
 1996         } else {
 1997             /* Do restart the measuring interval */
 1998             /*
 1999              * XXX: note that we don't unschedule and schedule, because this
 2000              * might be too much overhead per packet. Instead, when we process
 2001              * all entries for a given timer hash bin, we check whether it is
 2002              * really a timeout. If not, we reschedule at that time.
 2003              */
 2004             x->bm_start_time = *nowp;
 2005             x->bm_measured.b_packets = 0;
 2006             x->bm_measured.b_bytes = 0;
 2007             x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
 2008         }
 2009     }
 2010 }
 2011 
 2012 /*
 2013  * Prepare a bandwidth-related upcall
 2014  */
 2015 static void
 2016 bw_meter_prepare_upcall(struct bw_meter *x, struct timeval *nowp)
 2017 {
 2018     struct timeval delta;
 2019     struct bw_upcall *u;
 2020 
 2021     MFC_LOCK_ASSERT();
 2022 
 2023     /*
 2024      * Compute the measured time interval
 2025      */
 2026     delta = *nowp;
 2027     BW_TIMEVALDECR(&delta, &x->bm_start_time);
 2028 
 2029     /*
 2030      * If there are too many pending upcalls, deliver them now
 2031      */
 2032     if (V_bw_upcalls_n >= BW_UPCALLS_MAX)
 2033         bw_upcalls_send();
 2034 
 2035     /*
 2036      * Set the bw_upcall entry
 2037      */
 2038     u = &V_bw_upcalls[V_bw_upcalls_n++];
 2039     u->bu_src = x->bm_mfc->mfc_origin;
 2040     u->bu_dst = x->bm_mfc->mfc_mcastgrp;
 2041     u->bu_threshold.b_time = x->bm_threshold.b_time;
 2042     u->bu_threshold.b_packets = x->bm_threshold.b_packets;
 2043     u->bu_threshold.b_bytes = x->bm_threshold.b_bytes;
 2044     u->bu_measured.b_time = delta;
 2045     u->bu_measured.b_packets = x->bm_measured.b_packets;
 2046     u->bu_measured.b_bytes = x->bm_measured.b_bytes;
 2047     u->bu_flags = 0;
 2048     if (x->bm_flags & BW_METER_UNIT_PACKETS)
 2049         u->bu_flags |= BW_UPCALL_UNIT_PACKETS;
 2050     if (x->bm_flags & BW_METER_UNIT_BYTES)
 2051         u->bu_flags |= BW_UPCALL_UNIT_BYTES;
 2052     if (x->bm_flags & BW_METER_GEQ)
 2053         u->bu_flags |= BW_UPCALL_GEQ;
 2054     if (x->bm_flags & BW_METER_LEQ)
 2055         u->bu_flags |= BW_UPCALL_LEQ;
 2056 }
 2057 
 2058 /*
 2059  * Send the pending bandwidth-related upcalls
 2060  */
 2061 static void
 2062 bw_upcalls_send(void)
 2063 {
 2064     struct mbuf *m;
 2065     int len = V_bw_upcalls_n * sizeof(V_bw_upcalls[0]);
 2066     struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
 2067     static struct igmpmsg igmpmsg = { 0,                /* unused1 */
 2068                                       0,                /* unused2 */
 2069                                       IGMPMSG_BW_UPCALL,/* im_msgtype */
 2070                                       0,                /* im_mbz  */
 2071                                       0,                /* im_vif  */
 2072                                       0,                /* unused3 */
 2073                                       { 0 },            /* im_src  */
 2074                                       { 0 } };          /* im_dst  */
 2075 
 2076     MFC_LOCK_ASSERT();
 2077 
 2078     if (V_bw_upcalls_n == 0)
 2079         return;                 /* No pending upcalls */
 2080 
 2081     V_bw_upcalls_n = 0;
 2082 
 2083     /*
 2084      * Allocate a new mbuf, initialize it with the header and
 2085      * the payload for the pending calls.
 2086      */
 2087     MGETHDR(m, M_DONTWAIT, MT_DATA);
 2088     if (m == NULL) {
 2089         log(LOG_WARNING, "bw_upcalls_send: cannot allocate mbuf\n");
 2090         return;
 2091     }
 2092 
 2093     m->m_len = m->m_pkthdr.len = 0;
 2094     m_copyback(m, 0, sizeof(struct igmpmsg), (caddr_t)&igmpmsg);
 2095     m_copyback(m, sizeof(struct igmpmsg), len, (caddr_t)&V_bw_upcalls[0]);
 2096 
 2097     /*
 2098      * Send the upcalls
 2099      * XXX do we need to set the address in k_igmpsrc ?
 2100      */
 2101     MRTSTAT_INC(mrts_upcalls);
 2102     if (socket_send(V_ip_mrouter, m, &k_igmpsrc) < 0) {
 2103         log(LOG_WARNING, "bw_upcalls_send: ip_mrouter socket queue full\n");
 2104         MRTSTAT_INC(mrts_upq_sockfull);
 2105     }
 2106 }
 2107 
 2108 /*
 2109  * Compute the timeout hash value for the bw_meter entries
 2110  */
 2111 #define BW_METER_TIMEHASH(bw_meter, hash)                               \
 2112     do {                                                                \
 2113         struct timeval next_timeval = (bw_meter)->bm_start_time;        \
 2114                                                                         \
 2115         BW_TIMEVALADD(&next_timeval, &(bw_meter)->bm_threshold.b_time); \
 2116         (hash) = next_timeval.tv_sec;                                   \
 2117         if (next_timeval.tv_usec)                                       \
 2118             (hash)++; /* XXX: make sure we don't timeout early */       \
 2119         (hash) %= BW_METER_BUCKETS;                                     \
 2120     } while (0)
 2121 
 2122 /*
 2123  * Schedule a timer to process periodically bw_meter entry of type "<="
 2124  * by linking the entry in the proper hash bucket.
 2125  */
 2126 static void
 2127 schedule_bw_meter(struct bw_meter *x, struct timeval *nowp)
 2128 {
 2129     int time_hash;
 2130 
 2131     MFC_LOCK_ASSERT();
 2132 
 2133     if (!(x->bm_flags & BW_METER_LEQ))
 2134         return;         /* XXX: we schedule timers only for "<=" entries */
 2135 
 2136     /*
 2137      * Reset the bw_meter entry
 2138      */
 2139     x->bm_start_time = *nowp;
 2140     x->bm_measured.b_packets = 0;
 2141     x->bm_measured.b_bytes = 0;
 2142     x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
 2143 
 2144     /*
 2145      * Compute the timeout hash value and insert the entry
 2146      */
 2147     BW_METER_TIMEHASH(x, time_hash);
 2148     x->bm_time_next = V_bw_meter_timers[time_hash];
 2149     V_bw_meter_timers[time_hash] = x;
 2150     x->bm_time_hash = time_hash;
 2151 }
 2152 
 2153 /*
 2154  * Unschedule the periodic timer that processes bw_meter entry of type "<="
 2155  * by removing the entry from the proper hash bucket.
 2156  */
 2157 static void
 2158 unschedule_bw_meter(struct bw_meter *x)
 2159 {
 2160     int time_hash;
 2161     struct bw_meter *prev, *tmp;
 2162 
 2163     MFC_LOCK_ASSERT();
 2164 
 2165     if (!(x->bm_flags & BW_METER_LEQ))
 2166         return;         /* XXX: we schedule timers only for "<=" entries */
 2167 
 2168     /*
 2169      * Compute the timeout hash value and delete the entry
 2170      */
 2171     time_hash = x->bm_time_hash;
 2172     if (time_hash >= BW_METER_BUCKETS)
 2173         return;         /* Entry was not scheduled */
 2174 
 2175     for (prev = NULL, tmp = V_bw_meter_timers[time_hash];
 2176              tmp != NULL; prev = tmp, tmp = tmp->bm_time_next)
 2177         if (tmp == x)
 2178             break;
 2179 
 2180     if (tmp == NULL)
 2181         panic("unschedule_bw_meter: bw_meter entry not found");
 2182 
 2183     if (prev != NULL)
 2184         prev->bm_time_next = x->bm_time_next;
 2185     else
 2186         V_bw_meter_timers[time_hash] = x->bm_time_next;
 2187 
 2188     x->bm_time_next = NULL;
 2189     x->bm_time_hash = BW_METER_BUCKETS;
 2190 }
 2191 
 2192 
 2193 /*
 2194  * Process all "<=" type of bw_meter that should be processed now,
 2195  * and for each entry prepare an upcall if necessary. Each processed
 2196  * entry is rescheduled again for the (periodic) processing.
 2197  *
 2198  * This is run periodically (once per second normally). On each round,
 2199  * all the potentially matching entries are in the hash slot that we are
 2200  * looking at.
 2201  */
 2202 static void
 2203 bw_meter_process()
 2204 {
 2205     uint32_t loops;
 2206     int i;
 2207     struct timeval now, process_endtime;
 2208 
 2209     microtime(&now);
 2210     if (V_last_tv_sec == now.tv_sec)
 2211         return;         /* nothing to do */
 2212 
 2213     loops = now.tv_sec - V_last_tv_sec;
 2214     V_last_tv_sec = now.tv_sec;
 2215     if (loops > BW_METER_BUCKETS)
 2216         loops = BW_METER_BUCKETS;
 2217 
 2218     MFC_LOCK();
 2219     /*
 2220      * Process all bins of bw_meter entries from the one after the last
 2221      * processed to the current one. On entry, i points to the last bucket
 2222      * visited, so we need to increment i at the beginning of the loop.
 2223      */
 2224     for (i = (now.tv_sec - loops) % BW_METER_BUCKETS; loops > 0; loops--) {
 2225         struct bw_meter *x, *tmp_list;
 2226 
 2227         if (++i >= BW_METER_BUCKETS)
 2228             i = 0;
 2229 
 2230         /* Disconnect the list of bw_meter entries from the bin */
 2231         tmp_list = V_bw_meter_timers[i];
 2232         V_bw_meter_timers[i] = NULL;
 2233 
 2234         /* Process the list of bw_meter entries */
 2235         while (tmp_list != NULL) {
 2236             x = tmp_list;
 2237             tmp_list = tmp_list->bm_time_next;
 2238 
 2239             /* Test if the time interval is over */
 2240             process_endtime = x->bm_start_time;
 2241             BW_TIMEVALADD(&process_endtime, &x->bm_threshold.b_time);
 2242             if (BW_TIMEVALCMP(&process_endtime, &now, >)) {
 2243                 /* Not yet: reschedule, but don't reset */
 2244                 int time_hash;
 2245 
 2246                 BW_METER_TIMEHASH(x, time_hash);
 2247                 if (time_hash == i && process_endtime.tv_sec == now.tv_sec) {
 2248                     /*
 2249                      * XXX: somehow the bin processing is a bit ahead of time.
 2250                      * Put the entry in the next bin.
 2251                      */
 2252                     if (++time_hash >= BW_METER_BUCKETS)
 2253                         time_hash = 0;
 2254                 }
 2255                 x->bm_time_next = V_bw_meter_timers[time_hash];
 2256                 V_bw_meter_timers[time_hash] = x;
 2257                 x->bm_time_hash = time_hash;
 2258 
 2259                 continue;
 2260             }
 2261 
 2262             /*
 2263              * Test if we should deliver an upcall
 2264              */
 2265             if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
 2266                  (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) ||
 2267                 ((x->bm_flags & BW_METER_UNIT_BYTES) &&
 2268                  (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) {
 2269                 /* Prepare an upcall for delivery */
 2270                 bw_meter_prepare_upcall(x, &now);
 2271             }
 2272 
 2273             /*
 2274              * Reschedule for next processing
 2275              */
 2276             schedule_bw_meter(x, &now);
 2277         }
 2278     }
 2279 
 2280     /* Send all upcalls that are pending delivery */
 2281     bw_upcalls_send();
 2282 
 2283     MFC_UNLOCK();
 2284 }
 2285 
 2286 /*
 2287  * A periodic function for sending all upcalls that are pending delivery
 2288  */
 2289 static void
 2290 expire_bw_upcalls_send(void *arg)
 2291 {
 2292     CURVNET_SET((struct vnet *) arg);
 2293 
 2294     MFC_LOCK();
 2295     bw_upcalls_send();
 2296     MFC_UNLOCK();
 2297 
 2298     callout_reset(&V_bw_upcalls_ch, BW_UPCALLS_PERIOD, expire_bw_upcalls_send,
 2299         curvnet);
 2300     CURVNET_RESTORE();
 2301 }
 2302 
 2303 /*
 2304  * A periodic function for periodic scanning of the multicast forwarding
 2305  * table for processing all "<=" bw_meter entries.
 2306  */
 2307 static void
 2308 expire_bw_meter_process(void *arg)
 2309 {
 2310     CURVNET_SET((struct vnet *) arg);
 2311 
 2312     if (V_mrt_api_config & MRT_MFC_BW_UPCALL)
 2313         bw_meter_process();
 2314 
 2315     callout_reset(&V_bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process,
 2316         curvnet);
 2317     CURVNET_RESTORE();
 2318 }
 2319 
 2320 /*
 2321  * End of bandwidth monitoring code
 2322  */
 2323 
 2324 /*
 2325  * Send the packet up to the user daemon, or eventually do kernel encapsulation
 2326  *
 2327  */
 2328 static int
 2329 pim_register_send(struct ip *ip, struct vif *vifp, struct mbuf *m,
 2330     struct mfc *rt)
 2331 {
 2332     struct mbuf *mb_copy, *mm;
 2333 
 2334     /*
 2335      * Do not send IGMP_WHOLEPKT notifications to userland, if the
 2336      * rendezvous point was unspecified, and we were told not to.
 2337      */
 2338     if (pim_squelch_wholepkt != 0 && (V_mrt_api_config & MRT_MFC_RP) &&
 2339         in_nullhost(rt->mfc_rp))
 2340         return 0;
 2341 
 2342     mb_copy = pim_register_prepare(ip, m);
 2343     if (mb_copy == NULL)
 2344         return ENOBUFS;
 2345 
 2346     /*
 2347      * Send all the fragments. Note that the mbuf for each fragment
 2348      * is freed by the sending machinery.
 2349      */
 2350     for (mm = mb_copy; mm; mm = mb_copy) {
 2351         mb_copy = mm->m_nextpkt;
 2352         mm->m_nextpkt = 0;
 2353         mm = m_pullup(mm, sizeof(struct ip));
 2354         if (mm != NULL) {
 2355             ip = mtod(mm, struct ip *);
 2356             if ((V_mrt_api_config & MRT_MFC_RP) && !in_nullhost(rt->mfc_rp)) {
 2357                 pim_register_send_rp(ip, vifp, mm, rt);
 2358             } else {
 2359                 pim_register_send_upcall(ip, vifp, mm, rt);
 2360             }
 2361         }
 2362     }
 2363 
 2364     return 0;
 2365 }
 2366 
 2367 /*
 2368  * Return a copy of the data packet that is ready for PIM Register
 2369  * encapsulation.
 2370  * XXX: Note that in the returned copy the IP header is a valid one.
 2371  */
 2372 static struct mbuf *
 2373 pim_register_prepare(struct ip *ip, struct mbuf *m)
 2374 {
 2375     struct mbuf *mb_copy = NULL;
 2376     int mtu;
 2377 
 2378     /* Take care of delayed checksums */
 2379     if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
 2380         in_delayed_cksum(m);
 2381         m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
 2382     }
 2383 
 2384     /*
 2385      * Copy the old packet & pullup its IP header into the
 2386      * new mbuf so we can modify it.
 2387      */
 2388     mb_copy = m_copypacket(m, M_DONTWAIT);
 2389     if (mb_copy == NULL)
 2390         return NULL;
 2391     mb_copy = m_pullup(mb_copy, ip->ip_hl << 2);
 2392     if (mb_copy == NULL)
 2393         return NULL;
 2394 
 2395     /* take care of the TTL */
 2396     ip = mtod(mb_copy, struct ip *);
 2397     --ip->ip_ttl;
 2398 
 2399     /* Compute the MTU after the PIM Register encapsulation */
 2400     mtu = 0xffff - sizeof(pim_encap_iphdr) - sizeof(pim_encap_pimhdr);
 2401 
 2402     if (ip->ip_len <= mtu) {
 2403         /* Turn the IP header into a valid one */
 2404         ip->ip_len = htons(ip->ip_len);
 2405         ip->ip_off = htons(ip->ip_off);
 2406         ip->ip_sum = 0;
 2407         ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2);
 2408     } else {
 2409         /* Fragment the packet */
 2410         if (ip_fragment(ip, &mb_copy, mtu, 0, CSUM_DELAY_IP) != 0) {
 2411             m_freem(mb_copy);
 2412             return NULL;
 2413         }
 2414     }
 2415     return mb_copy;
 2416 }
 2417 
 2418 /*
 2419  * Send an upcall with the data packet to the user-level process.
 2420  */
 2421 static int
 2422 pim_register_send_upcall(struct ip *ip, struct vif *vifp,
 2423     struct mbuf *mb_copy, struct mfc *rt)
 2424 {
 2425     struct mbuf *mb_first;
 2426     int len = ntohs(ip->ip_len);
 2427     struct igmpmsg *im;
 2428     struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
 2429 
 2430     VIF_LOCK_ASSERT();
 2431 
 2432     /*
 2433      * Add a new mbuf with an upcall header
 2434      */
 2435     MGETHDR(mb_first, M_DONTWAIT, MT_DATA);
 2436     if (mb_first == NULL) {
 2437         m_freem(mb_copy);
 2438         return ENOBUFS;
 2439     }
 2440     mb_first->m_data += max_linkhdr;
 2441     mb_first->m_pkthdr.len = len + sizeof(struct igmpmsg);
 2442     mb_first->m_len = sizeof(struct igmpmsg);
 2443     mb_first->m_next = mb_copy;
 2444 
 2445     /* Send message to routing daemon */
 2446     im = mtod(mb_first, struct igmpmsg *);
 2447     im->im_msgtype      = IGMPMSG_WHOLEPKT;
 2448     im->im_mbz          = 0;
 2449     im->im_vif          = vifp - V_viftable;
 2450     im->im_src          = ip->ip_src;
 2451     im->im_dst          = ip->ip_dst;
 2452 
 2453     k_igmpsrc.sin_addr  = ip->ip_src;
 2454 
 2455     MRTSTAT_INC(mrts_upcalls);
 2456 
 2457     if (socket_send(V_ip_mrouter, mb_first, &k_igmpsrc) < 0) {
 2458         CTR1(KTR_IPMF, "%s: socket queue full", __func__);
 2459         MRTSTAT_INC(mrts_upq_sockfull);
 2460         return ENOBUFS;
 2461     }
 2462 
 2463     /* Keep statistics */
 2464     PIMSTAT_INC(pims_snd_registers_msgs);
 2465     PIMSTAT_ADD(pims_snd_registers_bytes, len);
 2466 
 2467     return 0;
 2468 }
 2469 
 2470 /*
 2471  * Encapsulate the data packet in PIM Register message and send it to the RP.
 2472  */
 2473 static int
 2474 pim_register_send_rp(struct ip *ip, struct vif *vifp, struct mbuf *mb_copy,
 2475     struct mfc *rt)
 2476 {
 2477     struct mbuf *mb_first;
 2478     struct ip *ip_outer;
 2479     struct pim_encap_pimhdr *pimhdr;
 2480     int len = ntohs(ip->ip_len);
 2481     vifi_t vifi = rt->mfc_parent;
 2482 
 2483     VIF_LOCK_ASSERT();
 2484 
 2485     if ((vifi >= V_numvifs) || in_nullhost(V_viftable[vifi].v_lcl_addr)) {
 2486         m_freem(mb_copy);
 2487         return EADDRNOTAVAIL;           /* The iif vif is invalid */
 2488     }
 2489 
 2490     /*
 2491      * Add a new mbuf with the encapsulating header
 2492      */
 2493     MGETHDR(mb_first, M_DONTWAIT, MT_DATA);
 2494     if (mb_first == NULL) {
 2495         m_freem(mb_copy);
 2496         return ENOBUFS;
 2497     }
 2498     mb_first->m_data += max_linkhdr;
 2499     mb_first->m_len = sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr);
 2500     mb_first->m_next = mb_copy;
 2501 
 2502     mb_first->m_pkthdr.len = len + mb_first->m_len;
 2503 
 2504     /*
 2505      * Fill in the encapsulating IP and PIM header
 2506      */
 2507     ip_outer = mtod(mb_first, struct ip *);
 2508     *ip_outer = pim_encap_iphdr;
 2509     ip_outer->ip_id = ip_newid();
 2510     ip_outer->ip_len = len + sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr);
 2511     ip_outer->ip_src = V_viftable[vifi].v_lcl_addr;
 2512     ip_outer->ip_dst = rt->mfc_rp;
 2513     /*
 2514      * Copy the inner header TOS to the outer header, and take care of the
 2515      * IP_DF bit.
 2516      */
 2517     ip_outer->ip_tos = ip->ip_tos;
 2518     if (ntohs(ip->ip_off) & IP_DF)
 2519         ip_outer->ip_off |= IP_DF;
 2520     pimhdr = (struct pim_encap_pimhdr *)((caddr_t)ip_outer
 2521                                          + sizeof(pim_encap_iphdr));
 2522     *pimhdr = pim_encap_pimhdr;
 2523     /* If the iif crosses a border, set the Border-bit */
 2524     if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_BORDER_VIF & V_mrt_api_config)
 2525         pimhdr->flags |= htonl(PIM_BORDER_REGISTER);
 2526 
 2527     mb_first->m_data += sizeof(pim_encap_iphdr);
 2528     pimhdr->pim.pim_cksum = in_cksum(mb_first, sizeof(pim_encap_pimhdr));
 2529     mb_first->m_data -= sizeof(pim_encap_iphdr);
 2530 
 2531     send_packet(vifp, mb_first);
 2532 
 2533     /* Keep statistics */
 2534     PIMSTAT_INC(pims_snd_registers_msgs);
 2535     PIMSTAT_ADD(pims_snd_registers_bytes, len);
 2536 
 2537     return 0;
 2538 }
 2539 
 2540 /*
 2541  * pim_encapcheck() is called by the encap4_input() path at runtime to
 2542  * determine if a packet is for PIM; allowing PIM to be dynamically loaded
 2543  * into the kernel.
 2544  */
 2545 static int
 2546 pim_encapcheck(const struct mbuf *m, int off, int proto, void *arg)
 2547 {
 2548 
 2549 #ifdef DIAGNOSTIC
 2550     KASSERT(proto == IPPROTO_PIM, ("not for IPPROTO_PIM"));
 2551 #endif
 2552     if (proto != IPPROTO_PIM)
 2553         return 0;       /* not for us; reject the datagram. */
 2554 
 2555     return 64;          /* claim the datagram. */
 2556 }
 2557 
 2558 /*
 2559  * PIM-SMv2 and PIM-DM messages processing.
 2560  * Receives and verifies the PIM control messages, and passes them
 2561  * up to the listening socket, using rip_input().
 2562  * The only message with special processing is the PIM_REGISTER message
 2563  * (used by PIM-SM): the PIM header is stripped off, and the inner packet
 2564  * is passed to if_simloop().
 2565  */
 2566 void
 2567 pim_input(struct mbuf *m, int off)
 2568 {
 2569     struct ip *ip = mtod(m, struct ip *);
 2570     struct pim *pim;
 2571     int minlen;
 2572     int datalen = ip->ip_len;
 2573     int ip_tos;
 2574     int iphlen = off;
 2575 
 2576     /* Keep statistics */
 2577     PIMSTAT_INC(pims_rcv_total_msgs);
 2578     PIMSTAT_ADD(pims_rcv_total_bytes, datalen);
 2579 
 2580     /*
 2581      * Validate lengths
 2582      */
 2583     if (datalen < PIM_MINLEN) {
 2584         PIMSTAT_INC(pims_rcv_tooshort);
 2585         CTR3(KTR_IPMF, "%s: short packet (%d) from %s",
 2586             __func__, datalen, inet_ntoa(ip->ip_src));
 2587         m_freem(m);
 2588         return;
 2589     }
 2590 
 2591     /*
 2592      * If the packet is at least as big as a REGISTER, go agead
 2593      * and grab the PIM REGISTER header size, to avoid another
 2594      * possible m_pullup() later.
 2595      *
 2596      * PIM_MINLEN       == pimhdr + u_int32_t == 4 + 4 = 8
 2597      * PIM_REG_MINLEN   == pimhdr + reghdr + encap_iphdr == 4 + 4 + 20 = 28
 2598      */
 2599     minlen = iphlen + (datalen >= PIM_REG_MINLEN ? PIM_REG_MINLEN : PIM_MINLEN);
 2600     /*
 2601      * Get the IP and PIM headers in contiguous memory, and
 2602      * possibly the PIM REGISTER header.
 2603      */
 2604     if ((m->m_flags & M_EXT || m->m_len < minlen) &&
 2605         (m = m_pullup(m, minlen)) == 0) {
 2606         CTR1(KTR_IPMF, "%s: m_pullup() failed", __func__);
 2607         return;
 2608     }
 2609 
 2610     /* m_pullup() may have given us a new mbuf so reset ip. */
 2611     ip = mtod(m, struct ip *);
 2612     ip_tos = ip->ip_tos;
 2613 
 2614     /* adjust mbuf to point to the PIM header */
 2615     m->m_data += iphlen;
 2616     m->m_len  -= iphlen;
 2617     pim = mtod(m, struct pim *);
 2618 
 2619     /*
 2620      * Validate checksum. If PIM REGISTER, exclude the data packet.
 2621      *
 2622      * XXX: some older PIMv2 implementations don't make this distinction,
 2623      * so for compatibility reason perform the checksum over part of the
 2624      * message, and if error, then over the whole message.
 2625      */
 2626     if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER && in_cksum(m, PIM_MINLEN) == 0) {
 2627         /* do nothing, checksum okay */
 2628     } else if (in_cksum(m, datalen)) {
 2629         PIMSTAT_INC(pims_rcv_badsum);
 2630         CTR1(KTR_IPMF, "%s: invalid checksum", __func__);
 2631         m_freem(m);
 2632         return;
 2633     }
 2634 
 2635     /* PIM version check */
 2636     if (PIM_VT_V(pim->pim_vt) < PIM_VERSION) {
 2637         PIMSTAT_INC(pims_rcv_badversion);
 2638         CTR3(KTR_IPMF, "%s: bad version %d expect %d", __func__,
 2639             (int)PIM_VT_V(pim->pim_vt), PIM_VERSION);
 2640         m_freem(m);
 2641         return;
 2642     }
 2643 
 2644     /* restore mbuf back to the outer IP */
 2645     m->m_data -= iphlen;
 2646     m->m_len  += iphlen;
 2647 
 2648     if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER) {
 2649         /*
 2650          * Since this is a REGISTER, we'll make a copy of the register
 2651          * headers ip + pim + u_int32 + encap_ip, to be passed up to the
 2652          * routing daemon.
 2653          */
 2654         struct sockaddr_in dst = { sizeof(dst), AF_INET };
 2655         struct mbuf *mcp;
 2656         struct ip *encap_ip;
 2657         u_int32_t *reghdr;
 2658         struct ifnet *vifp;
 2659 
 2660         VIF_LOCK();
 2661         if ((V_reg_vif_num >= V_numvifs) || (V_reg_vif_num == VIFI_INVALID)) {
 2662             VIF_UNLOCK();
 2663             CTR2(KTR_IPMF, "%s: register vif not set: %d", __func__,
 2664                 (int)V_reg_vif_num);
 2665             m_freem(m);
 2666             return;
 2667         }
 2668         /* XXX need refcnt? */
 2669         vifp = V_viftable[V_reg_vif_num].v_ifp;
 2670         VIF_UNLOCK();
 2671 
 2672         /*
 2673          * Validate length
 2674          */
 2675         if (datalen < PIM_REG_MINLEN) {
 2676             PIMSTAT_INC(pims_rcv_tooshort);
 2677             PIMSTAT_INC(pims_rcv_badregisters);
 2678             CTR1(KTR_IPMF, "%s: register packet size too small", __func__);
 2679             m_freem(m);
 2680             return;
 2681         }
 2682 
 2683         reghdr = (u_int32_t *)(pim + 1);
 2684         encap_ip = (struct ip *)(reghdr + 1);
 2685 
 2686         CTR3(KTR_IPMF, "%s: register: encap ip src %s len %d",
 2687             __func__, inet_ntoa(encap_ip->ip_src), ntohs(encap_ip->ip_len));
 2688 
 2689         /* verify the version number of the inner packet */
 2690         if (encap_ip->ip_v != IPVERSION) {
 2691             PIMSTAT_INC(pims_rcv_badregisters);
 2692             CTR1(KTR_IPMF, "%s: bad encap ip version", __func__);
 2693             m_freem(m);
 2694             return;
 2695         }
 2696 
 2697         /* verify the inner packet is destined to a mcast group */
 2698         if (!IN_MULTICAST(ntohl(encap_ip->ip_dst.s_addr))) {
 2699             PIMSTAT_INC(pims_rcv_badregisters);
 2700             CTR2(KTR_IPMF, "%s: bad encap ip dest %s", __func__,
 2701                 inet_ntoa(encap_ip->ip_dst));
 2702             m_freem(m);
 2703             return;
 2704         }
 2705 
 2706         /* If a NULL_REGISTER, pass it to the daemon */
 2707         if ((ntohl(*reghdr) & PIM_NULL_REGISTER))
 2708             goto pim_input_to_daemon;
 2709 
 2710         /*
 2711          * Copy the TOS from the outer IP header to the inner IP header.
 2712          */
 2713         if (encap_ip->ip_tos != ip_tos) {
 2714             /* Outer TOS -> inner TOS */
 2715             encap_ip->ip_tos = ip_tos;
 2716             /* Recompute the inner header checksum. Sigh... */
 2717 
 2718             /* adjust mbuf to point to the inner IP header */
 2719             m->m_data += (iphlen + PIM_MINLEN);
 2720             m->m_len  -= (iphlen + PIM_MINLEN);
 2721 
 2722             encap_ip->ip_sum = 0;
 2723             encap_ip->ip_sum = in_cksum(m, encap_ip->ip_hl << 2);
 2724 
 2725             /* restore mbuf to point back to the outer IP header */
 2726             m->m_data -= (iphlen + PIM_MINLEN);
 2727             m->m_len  += (iphlen + PIM_MINLEN);
 2728         }
 2729 
 2730         /*
 2731          * Decapsulate the inner IP packet and loopback to forward it
 2732          * as a normal multicast packet. Also, make a copy of the
 2733          *     outer_iphdr + pimhdr + reghdr + encap_iphdr
 2734          * to pass to the daemon later, so it can take the appropriate
 2735          * actions (e.g., send back PIM_REGISTER_STOP).
 2736          * XXX: here m->m_data points to the outer IP header.
 2737          */
 2738         mcp = m_copy(m, 0, iphlen + PIM_REG_MINLEN);
 2739         if (mcp == NULL) {
 2740             CTR1(KTR_IPMF, "%s: m_copy() failed", __func__);
 2741             m_freem(m);
 2742             return;
 2743         }
 2744 
 2745         /* Keep statistics */
 2746         /* XXX: registers_bytes include only the encap. mcast pkt */
 2747         PIMSTAT_INC(pims_rcv_registers_msgs);
 2748         PIMSTAT_ADD(pims_rcv_registers_bytes, ntohs(encap_ip->ip_len));
 2749 
 2750         /*
 2751          * forward the inner ip packet; point m_data at the inner ip.
 2752          */
 2753         m_adj(m, iphlen + PIM_MINLEN);
 2754 
 2755         CTR4(KTR_IPMF,
 2756             "%s: forward decap'd REGISTER: src %lx dst %lx vif %d",
 2757             __func__,
 2758             (u_long)ntohl(encap_ip->ip_src.s_addr),
 2759             (u_long)ntohl(encap_ip->ip_dst.s_addr),
 2760             (int)V_reg_vif_num);
 2761 
 2762         /* NB: vifp was collected above; can it change on us? */
 2763         if_simloop(vifp, m, dst.sin_family, 0);
 2764 
 2765         /* prepare the register head to send to the mrouting daemon */
 2766         m = mcp;
 2767     }
 2768 
 2769 pim_input_to_daemon:
 2770     /*
 2771      * Pass the PIM message up to the daemon; if it is a Register message,
 2772      * pass the 'head' only up to the daemon. This includes the
 2773      * outer IP header, PIM header, PIM-Register header and the
 2774      * inner IP header.
 2775      * XXX: the outer IP header pkt size of a Register is not adjust to
 2776      * reflect the fact that the inner multicast data is truncated.
 2777      */
 2778     rip_input(m, iphlen);
 2779 
 2780     return;
 2781 }
 2782 
 2783 static int
 2784 sysctl_mfctable(SYSCTL_HANDLER_ARGS)
 2785 {
 2786         struct mfc      *rt;
 2787         int              error, i;
 2788 
 2789         if (req->newptr)
 2790                 return (EPERM);
 2791         if (V_mfchashtbl == NULL)       /* XXX unlocked */
 2792                 return (0);
 2793         error = sysctl_wire_old_buffer(req, 0);
 2794         if (error)
 2795                 return (error);
 2796 
 2797         MFC_LOCK();
 2798         for (i = 0; i < mfchashsize; i++) {
 2799                 LIST_FOREACH(rt, &V_mfchashtbl[i], mfc_hash) {
 2800                         error = SYSCTL_OUT(req, rt, sizeof(struct mfc));
 2801                         if (error)
 2802                                 goto out_locked;
 2803                 }
 2804         }
 2805 out_locked:
 2806         MFC_UNLOCK();
 2807         return (error);
 2808 }
 2809 
 2810 SYSCTL_NODE(_net_inet_ip, OID_AUTO, mfctable, CTLFLAG_RD, sysctl_mfctable,
 2811     "IPv4 Multicast Forwarding Table (struct *mfc[mfchashsize], "
 2812     "netinet/ip_mroute.h)");
 2813 
 2814 static void
 2815 vnet_mroute_init(const void *unused __unused)
 2816 {
 2817 
 2818         MALLOC(V_nexpire, u_char *, mfchashsize, M_MRTABLE, M_WAITOK|M_ZERO);
 2819         bzero(V_bw_meter_timers, sizeof(V_bw_meter_timers));
 2820         callout_init(&V_expire_upcalls_ch, CALLOUT_MPSAFE);
 2821         callout_init(&V_bw_upcalls_ch, CALLOUT_MPSAFE);
 2822         callout_init(&V_bw_meter_ch, CALLOUT_MPSAFE);
 2823 }
 2824 
 2825 VNET_SYSINIT(vnet_mroute_init, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, vnet_mroute_init,
 2826         NULL);
 2827 
 2828 static void
 2829 vnet_mroute_uninit(const void *unused __unused)
 2830 {
 2831 
 2832         FREE(V_nexpire, M_MRTABLE);
 2833         V_nexpire = NULL;
 2834 }
 2835 
 2836 VNET_SYSUNINIT(vnet_mroute_uninit, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, 
 2837         vnet_mroute_uninit, NULL);
 2838 
 2839 static int
 2840 ip_mroute_modevent(module_t mod, int type, void *unused)
 2841 {
 2842 
 2843     switch (type) {
 2844     case MOD_LOAD:
 2845         MROUTER_LOCK_INIT();
 2846 
 2847         if_detach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 
 2848             if_detached_event, NULL, EVENTHANDLER_PRI_ANY);
 2849         if (if_detach_event_tag == NULL) {
 2850                 printf("ip_mroute: unable to ifnet_deperture_even handler\n");
 2851                 MROUTER_LOCK_DESTROY();
 2852                 return (EINVAL);
 2853         }
 2854 
 2855         MFC_LOCK_INIT();
 2856         VIF_LOCK_INIT();
 2857 
 2858         mfchashsize = MFCHASHSIZE;
 2859         if (TUNABLE_ULONG_FETCH("net.inet.ip.mfchashsize", &mfchashsize) &&
 2860             !powerof2(mfchashsize)) {
 2861                 printf("WARNING: %s not a power of 2; using default\n",
 2862                     "net.inet.ip.mfchashsize");
 2863                 mfchashsize = MFCHASHSIZE;
 2864         }
 2865 
 2866         pim_squelch_wholepkt = 0;
 2867         TUNABLE_ULONG_FETCH("net.inet.pim.squelch_wholepkt",
 2868             &pim_squelch_wholepkt);
 2869 
 2870         pim_encap_cookie = encap_attach_func(AF_INET, IPPROTO_PIM,
 2871             pim_encapcheck, &in_pim_protosw, NULL);
 2872         if (pim_encap_cookie == NULL) {
 2873                 printf("ip_mroute: unable to attach pim encap\n");
 2874                 VIF_LOCK_DESTROY();
 2875                 MFC_LOCK_DESTROY();
 2876                 MROUTER_LOCK_DESTROY();
 2877                 return (EINVAL);
 2878         }
 2879 
 2880         ip_mcast_src = X_ip_mcast_src;
 2881         ip_mforward = X_ip_mforward;
 2882         ip_mrouter_done = X_ip_mrouter_done;
 2883         ip_mrouter_get = X_ip_mrouter_get;
 2884         ip_mrouter_set = X_ip_mrouter_set;
 2885 
 2886         ip_rsvp_force_done = X_ip_rsvp_force_done;
 2887         ip_rsvp_vif = X_ip_rsvp_vif;
 2888 
 2889         legal_vif_num = X_legal_vif_num;
 2890         mrt_ioctl = X_mrt_ioctl;
 2891         rsvp_input_p = X_rsvp_input;
 2892         break;
 2893 
 2894     case MOD_UNLOAD:
 2895         /*
 2896          * Typically module unload happens after the user-level
 2897          * process has shutdown the kernel services (the check
 2898          * below insures someone can't just yank the module out
 2899          * from under a running process).  But if the module is
 2900          * just loaded and then unloaded w/o starting up a user
 2901          * process we still need to cleanup.
 2902          */
 2903         MROUTER_LOCK();
 2904         if (ip_mrouter_cnt != 0) {
 2905             MROUTER_UNLOCK();
 2906             return (EINVAL);
 2907         }
 2908         ip_mrouter_unloading = 1;
 2909         MROUTER_UNLOCK();
 2910 
 2911         EVENTHANDLER_DEREGISTER(ifnet_departure_event, if_detach_event_tag);
 2912 
 2913         if (pim_encap_cookie) {
 2914             encap_detach(pim_encap_cookie);
 2915             pim_encap_cookie = NULL;
 2916         }
 2917 
 2918         ip_mcast_src = NULL;
 2919         ip_mforward = NULL;
 2920         ip_mrouter_done = NULL;
 2921         ip_mrouter_get = NULL;
 2922         ip_mrouter_set = NULL;
 2923 
 2924         ip_rsvp_force_done = NULL;
 2925         ip_rsvp_vif = NULL;
 2926 
 2927         legal_vif_num = NULL;
 2928         mrt_ioctl = NULL;
 2929         rsvp_input_p = NULL;
 2930 
 2931         VIF_LOCK_DESTROY();
 2932         MFC_LOCK_DESTROY();
 2933         MROUTER_LOCK_DESTROY();
 2934         break;
 2935 
 2936     default:
 2937         return EOPNOTSUPP;
 2938     }
 2939     return 0;
 2940 }
 2941 
 2942 static moduledata_t ip_mroutemod = {
 2943     "ip_mroute",
 2944     ip_mroute_modevent,
 2945     0
 2946 };
 2947 
 2948 DECLARE_MODULE(ip_mroute, ip_mroutemod, SI_SUB_PSEUDO, SI_ORDER_ANY);

Cache object: 586b0714ee1c4d9c8b811218f1d3639d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.