The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet/in_rmx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright 1994, 1995 Massachusetts Institute of Technology
    3  *
    4  * Permission to use, copy, modify, and distribute this software and
    5  * its documentation for any purpose and without fee is hereby
    6  * granted, provided that both the above copyright notice and this
    7  * permission notice appear in all copies, that both the above
    8  * copyright notice and this permission notice appear in all
    9  * supporting documentation, and that the name of M.I.T. not be used
   10  * in advertising or publicity pertaining to distribution of the
   11  * software without specific, written prior permission.  M.I.T. makes
   12  * no representations about the suitability of this software for any
   13  * purpose.  It is provided "as is" without express or implied
   14  * warranty.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
   17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
   18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
   20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
   22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
   24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
   25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
   26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  * This code does two things necessary for the enhanced TCP metrics to
   32  * function in a useful manner:
   33  *  1) It marks all non-host routes as `cloning', thus ensuring that
   34  *     every actual reference to such a route actually gets turned
   35  *     into a reference to a host route to the specific destination
   36  *     requested.
   37  *  2) When such routes lose all their references, it arranges for them
   38  *     to be deleted in some random collection of circumstances, so that
   39  *     a large quantity of stale routing data is not kept in kernel memory
   40  *     indefinitely.  See in_rtqtimo() below for the exact mechanism.
   41  */
   42 
   43 #include <sys/cdefs.h>
   44 __FBSDID("$FreeBSD: releng/9.0/sys/netinet/in_rmx.c 215701 2010-11-22 19:32:54Z dim $");
   45 
   46 #include <sys/param.h>
   47 #include <sys/systm.h>
   48 #include <sys/kernel.h>
   49 #include <sys/sysctl.h>
   50 #include <sys/socket.h>
   51 #include <sys/mbuf.h>
   52 #include <sys/syslog.h>
   53 #include <sys/callout.h>
   54 
   55 #include <net/if.h>
   56 #include <net/route.h>
   57 #include <net/vnet.h>
   58 
   59 #include <netinet/in.h>
   60 #include <netinet/in_var.h>
   61 #include <netinet/ip_var.h>
   62 
   63 extern int      in_inithead(void **head, int off);
   64 #ifdef VIMAGE
   65 extern int      in_detachhead(void **head, int off);
   66 #endif
   67 
   68 #define RTPRF_OURS              RTF_PROTO3      /* set on routes we manage */
   69 
   70 /*
   71  * Do what we need to do when inserting a route.
   72  */
   73 static struct radix_node *
   74 in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
   75     struct radix_node *treenodes)
   76 {
   77         struct rtentry *rt = (struct rtentry *)treenodes;
   78         struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt);
   79 
   80         RADIX_NODE_HEAD_WLOCK_ASSERT(head);
   81         /*
   82          * A little bit of help for both IP output and input:
   83          *   For host routes, we make sure that RTF_BROADCAST
   84          *   is set for anything that looks like a broadcast address.
   85          *   This way, we can avoid an expensive call to in_broadcast()
   86          *   in ip_output() most of the time (because the route passed
   87          *   to ip_output() is almost always a host route).
   88          *
   89          *   We also do the same for local addresses, with the thought
   90          *   that this might one day be used to speed up ip_input().
   91          *
   92          * We also mark routes to multicast addresses as such, because
   93          * it's easy to do and might be useful (but this is much more
   94          * dubious since it's so easy to inspect the address).
   95          */
   96         if (rt->rt_flags & RTF_HOST) {
   97                 if (in_broadcast(sin->sin_addr, rt->rt_ifp)) {
   98                         rt->rt_flags |= RTF_BROADCAST;
   99                 } else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr ==
  100                     sin->sin_addr.s_addr) {
  101                         rt->rt_flags |= RTF_LOCAL;
  102                 }
  103         }
  104         if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
  105                 rt->rt_flags |= RTF_MULTICAST;
  106 
  107         if (!rt->rt_rmx.rmx_mtu && rt->rt_ifp)
  108                 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
  109 
  110         return (rn_addroute(v_arg, n_arg, head, treenodes));
  111 }
  112 
  113 /*
  114  * This code is the inverse of in_clsroute: on first reference, if we
  115  * were managing the route, stop doing so and set the expiration timer
  116  * back off again.
  117  */
  118 static struct radix_node *
  119 in_matroute(void *v_arg, struct radix_node_head *head)
  120 {
  121         struct radix_node *rn = rn_match(v_arg, head);
  122         struct rtentry *rt = (struct rtentry *)rn;
  123 
  124         if (rt) {
  125                 RT_LOCK(rt);
  126                 if (rt->rt_flags & RTPRF_OURS) {
  127                         rt->rt_flags &= ~RTPRF_OURS;
  128                         rt->rt_rmx.rmx_expire = 0;
  129                 }
  130                 RT_UNLOCK(rt);
  131         }
  132         return rn;
  133 }
  134 
  135 static VNET_DEFINE(int, rtq_reallyold) = 60*60; /* one hour is "really old" */
  136 #define V_rtq_reallyold         VNET(rtq_reallyold)
  137 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW,
  138     &VNET_NAME(rtq_reallyold), 0,
  139     "Default expiration time on dynamically learned routes");
  140 
  141 /* never automatically crank down to less */
  142 static VNET_DEFINE(int, rtq_minreallyold) = 10;
  143 #define V_rtq_minreallyold      VNET(rtq_minreallyold)
  144 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW,
  145     &VNET_NAME(rtq_minreallyold), 0,
  146     "Minimum time to attempt to hold onto dynamically learned routes");
  147 
  148 /* 128 cached routes is "too many" */
  149 static VNET_DEFINE(int, rtq_toomany) = 128;
  150 #define V_rtq_toomany           VNET(rtq_toomany)
  151 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW,
  152     &VNET_NAME(rtq_toomany), 0,
  153     "Upper limit on dynamically learned routes");
  154 
  155 /*
  156  * On last reference drop, mark the route as belong to us so that it can be
  157  * timed out.
  158  */
  159 static void
  160 in_clsroute(struct radix_node *rn, struct radix_node_head *head)
  161 {
  162         struct rtentry *rt = (struct rtentry *)rn;
  163 
  164         RT_LOCK_ASSERT(rt);
  165 
  166         if (!(rt->rt_flags & RTF_UP))
  167                 return;                 /* prophylactic measures */
  168 
  169         if (rt->rt_flags & RTPRF_OURS)
  170                 return;
  171 
  172         if (!(rt->rt_flags & RTF_DYNAMIC))
  173                 return;
  174 
  175         /*
  176          * If rtq_reallyold is 0, just delete the route without
  177          * waiting for a timeout cycle to kill it.
  178          */
  179         if (V_rtq_reallyold != 0) {
  180                 rt->rt_flags |= RTPRF_OURS;
  181                 rt->rt_rmx.rmx_expire = time_uptime + V_rtq_reallyold;
  182         } else {
  183                 rtexpunge(rt);
  184         }
  185 }
  186 
  187 struct rtqk_arg {
  188         struct radix_node_head *rnh;
  189         int draining;
  190         int killed;
  191         int found;
  192         int updating;
  193         time_t nextstop;
  194 };
  195 
  196 /*
  197  * Get rid of old routes.  When draining, this deletes everything, even when
  198  * the timeout is not expired yet.  When updating, this makes sure that
  199  * nothing has a timeout longer than the current value of rtq_reallyold.
  200  */
  201 static int
  202 in_rtqkill(struct radix_node *rn, void *rock)
  203 {
  204         struct rtqk_arg *ap = rock;
  205         struct rtentry *rt = (struct rtentry *)rn;
  206         int err;
  207 
  208         RADIX_NODE_HEAD_WLOCK_ASSERT(ap->rnh);
  209 
  210         if (rt->rt_flags & RTPRF_OURS) {
  211                 ap->found++;
  212 
  213                 if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) {
  214                         if (rt->rt_refcnt > 0)
  215                                 panic("rtqkill route really not free");
  216 
  217                         err = in_rtrequest(RTM_DELETE,
  218                                         (struct sockaddr *)rt_key(rt),
  219                                         rt->rt_gateway, rt_mask(rt),
  220                                         rt->rt_flags | RTF_RNH_LOCKED, 0,
  221                                         rt->rt_fibnum);
  222                         if (err) {
  223                                 log(LOG_WARNING, "in_rtqkill: error %d\n", err);
  224                         } else {
  225                                 ap->killed++;
  226                         }
  227                 } else {
  228                         if (ap->updating &&
  229                             (rt->rt_rmx.rmx_expire - time_uptime >
  230                              V_rtq_reallyold)) {
  231                                 rt->rt_rmx.rmx_expire =
  232                                     time_uptime + V_rtq_reallyold;
  233                         }
  234                         ap->nextstop = lmin(ap->nextstop,
  235                                             rt->rt_rmx.rmx_expire);
  236                 }
  237         }
  238 
  239         return 0;
  240 }
  241 
  242 #define RTQ_TIMEOUT     60*10   /* run no less than once every ten minutes */
  243 static VNET_DEFINE(int, rtq_timeout) = RTQ_TIMEOUT;
  244 static VNET_DEFINE(struct callout, rtq_timer);
  245 
  246 #define V_rtq_timeout           VNET(rtq_timeout)
  247 #define V_rtq_timer             VNET(rtq_timer)
  248 
  249 static void in_rtqtimo_one(void *rock);
  250 
  251 static void
  252 in_rtqtimo(void *rock)
  253 {
  254         CURVNET_SET((struct vnet *) rock);
  255         int fibnum;
  256         void *newrock;
  257         struct timeval atv;
  258 
  259         for (fibnum = 0; fibnum < rt_numfibs; fibnum++) {
  260                 newrock = rt_tables_get_rnh(fibnum, AF_INET);
  261                 if (newrock != NULL)
  262                         in_rtqtimo_one(newrock);
  263         }
  264         atv.tv_usec = 0;
  265         atv.tv_sec = V_rtq_timeout;
  266         callout_reset(&V_rtq_timer, tvtohz(&atv), in_rtqtimo, rock);
  267         CURVNET_RESTORE();
  268 }
  269 
  270 static void
  271 in_rtqtimo_one(void *rock)
  272 {
  273         struct radix_node_head *rnh = rock;
  274         struct rtqk_arg arg;
  275         static time_t last_adjusted_timeout = 0;
  276 
  277         arg.found = arg.killed = 0;
  278         arg.rnh = rnh;
  279         arg.nextstop = time_uptime + V_rtq_timeout;
  280         arg.draining = arg.updating = 0;
  281         RADIX_NODE_HEAD_LOCK(rnh);
  282         rnh->rnh_walktree(rnh, in_rtqkill, &arg);
  283         RADIX_NODE_HEAD_UNLOCK(rnh);
  284 
  285         /*
  286          * Attempt to be somewhat dynamic about this:
  287          * If there are ``too many'' routes sitting around taking up space,
  288          * then crank down the timeout, and see if we can't make some more
  289          * go away.  However, we make sure that we will never adjust more
  290          * than once in rtq_timeout seconds, to keep from cranking down too
  291          * hard.
  292          */
  293         if ((arg.found - arg.killed > V_rtq_toomany) &&
  294             (time_uptime - last_adjusted_timeout >= V_rtq_timeout) &&
  295             V_rtq_reallyold > V_rtq_minreallyold) {
  296                 V_rtq_reallyold = 2 * V_rtq_reallyold / 3;
  297                 if (V_rtq_reallyold < V_rtq_minreallyold) {
  298                         V_rtq_reallyold = V_rtq_minreallyold;
  299                 }
  300 
  301                 last_adjusted_timeout = time_uptime;
  302 #ifdef DIAGNOSTIC
  303                 log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n",
  304                     V_rtq_reallyold);
  305 #endif
  306                 arg.found = arg.killed = 0;
  307                 arg.updating = 1;
  308                 RADIX_NODE_HEAD_LOCK(rnh);
  309                 rnh->rnh_walktree(rnh, in_rtqkill, &arg);
  310                 RADIX_NODE_HEAD_UNLOCK(rnh);
  311         }
  312 
  313 }
  314 
  315 void
  316 in_rtqdrain(void)
  317 {
  318         VNET_ITERATOR_DECL(vnet_iter);
  319         struct radix_node_head *rnh;
  320         struct rtqk_arg arg;
  321         int     fibnum;
  322 
  323         VNET_LIST_RLOCK_NOSLEEP();
  324         VNET_FOREACH(vnet_iter) {
  325                 CURVNET_SET(vnet_iter);
  326 
  327                 for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) {
  328                         rnh = rt_tables_get_rnh(fibnum, AF_INET);
  329                         arg.found = arg.killed = 0;
  330                         arg.rnh = rnh;
  331                         arg.nextstop = 0;
  332                         arg.draining = 1;
  333                         arg.updating = 0;
  334                         RADIX_NODE_HEAD_LOCK(rnh);
  335                         rnh->rnh_walktree(rnh, in_rtqkill, &arg);
  336                         RADIX_NODE_HEAD_UNLOCK(rnh);
  337                 }
  338                 CURVNET_RESTORE();
  339         }
  340         VNET_LIST_RUNLOCK_NOSLEEP();
  341 }
  342 
  343 static int _in_rt_was_here;
  344 /*
  345  * Initialize our routing tree.
  346  */
  347 int
  348 in_inithead(void **head, int off)
  349 {
  350         struct radix_node_head *rnh;
  351 
  352         /* XXX MRT
  353          * This can be called from vfs_export.c too in which case 'off'
  354          * will be 0. We know the correct value so just use that and
  355          * return directly if it was 0.
  356          * This is a hack that replaces an even worse hack on a bad hack
  357          * on a bad design. After RELENG_7 this should be fixed but that
  358          * will change the ABI, so for now do it this way.
  359          */
  360         if (!rn_inithead(head, 32))
  361                 return 0;
  362 
  363         if (off == 0)           /* XXX MRT  see above */
  364                 return 1;       /* only do the rest for a real routing table */
  365 
  366         rnh = *head;
  367         rnh->rnh_addaddr = in_addroute;
  368         rnh->rnh_matchaddr = in_matroute;
  369         rnh->rnh_close = in_clsroute;
  370         if (_in_rt_was_here == 0 ) {
  371                 callout_init(&V_rtq_timer, CALLOUT_MPSAFE);
  372                 callout_reset(&V_rtq_timer, 1, in_rtqtimo, curvnet);
  373                 _in_rt_was_here = 1;
  374         }
  375         return 1;
  376 }
  377 
  378 #ifdef VIMAGE
  379 int
  380 in_detachhead(void **head, int off)
  381 {
  382 
  383         callout_drain(&V_rtq_timer);
  384         return (1);
  385 }
  386 #endif
  387 
  388 /*
  389  * This zaps old routes when the interface goes down or interface
  390  * address is deleted.  In the latter case, it deletes static routes
  391  * that point to this address.  If we don't do this, we may end up
  392  * using the old address in the future.  The ones we always want to
  393  * get rid of are things like ARP entries, since the user might down
  394  * the interface, walk over to a completely different network, and
  395  * plug back in.
  396  */
  397 struct in_ifadown_arg {
  398         struct ifaddr *ifa;
  399         int del;
  400 };
  401 
  402 static int
  403 in_ifadownkill(struct radix_node *rn, void *xap)
  404 {
  405         struct in_ifadown_arg *ap = xap;
  406         struct rtentry *rt = (struct rtentry *)rn;
  407 
  408         RT_LOCK(rt);
  409         if (rt->rt_ifa == ap->ifa &&
  410             (ap->del || !(rt->rt_flags & RTF_STATIC))) {
  411                 /*
  412                  * Aquire a reference so that it can later be freed
  413                  * as the refcount would be 0 here in case of at least
  414                  * ap->del.
  415                  */
  416                 RT_ADDREF(rt);
  417                 /*
  418                  * Disconnect it from the tree and permit protocols
  419                  * to cleanup.
  420                  */
  421                 rtexpunge(rt);
  422                 /*
  423                  * At this point it is an rttrash node, and in case
  424                  * the above is the only reference we must free it.
  425                  * If we do not noone will have a pointer and the
  426                  * rtentry will be leaked forever.
  427                  * In case someone else holds a reference, we are
  428                  * fine as we only decrement the refcount. In that
  429                  * case if the other entity calls RT_REMREF, we
  430                  * will still be leaking but at least we tried.
  431                  */
  432                 RTFREE_LOCKED(rt);
  433                 return (0);
  434         }
  435         RT_UNLOCK(rt);
  436         return 0;
  437 }
  438 
  439 int
  440 in_ifadown(struct ifaddr *ifa, int delete)
  441 {
  442         struct in_ifadown_arg arg;
  443         struct radix_node_head *rnh;
  444         int     fibnum;
  445 
  446         if (ifa->ifa_addr->sa_family != AF_INET)
  447                 return 1;
  448 
  449         for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) {
  450                 rnh = rt_tables_get_rnh(fibnum, AF_INET);
  451                 arg.ifa = ifa;
  452                 arg.del = delete;
  453                 RADIX_NODE_HEAD_LOCK(rnh);
  454                 rnh->rnh_walktree(rnh, in_ifadownkill, &arg);
  455                 RADIX_NODE_HEAD_UNLOCK(rnh);
  456                 ifa->ifa_flags &= ~IFA_ROUTE;           /* XXXlocking? */
  457         }
  458         return 0;
  459 }
  460 
  461 /*
  462  * inet versions of rt functions. These have fib extensions and 
  463  * for now will just reference the _fib variants.
  464  * eventually this order will be reversed,
  465  */
  466 void
  467 in_rtalloc_ign(struct route *ro, u_long ignflags, u_int fibnum)
  468 {
  469         rtalloc_ign_fib(ro, ignflags, fibnum);
  470 }
  471 
  472 int
  473 in_rtrequest( int req,
  474         struct sockaddr *dst,
  475         struct sockaddr *gateway,
  476         struct sockaddr *netmask,
  477         int flags,
  478         struct rtentry **ret_nrt,
  479         u_int fibnum)
  480 {
  481         return (rtrequest_fib(req, dst, gateway, netmask, 
  482             flags, ret_nrt, fibnum));
  483 }
  484 
  485 struct rtentry *
  486 in_rtalloc1(struct sockaddr *dst, int report, u_long ignflags, u_int fibnum)
  487 {
  488         return (rtalloc1_fib(dst, report, ignflags, fibnum));
  489 }
  490 
  491 void
  492 in_rtredirect(struct sockaddr *dst,
  493         struct sockaddr *gateway,
  494         struct sockaddr *netmask,
  495         int flags,
  496         struct sockaddr *src,
  497         u_int fibnum)
  498 {
  499         rtredirect_fib(dst, gateway, netmask, flags, src, fibnum);
  500 }
  501  
  502 void
  503 in_rtalloc(struct route *ro, u_int fibnum)
  504 {
  505         rtalloc_ign_fib(ro, 0UL, fibnum);
  506 }
  507 
  508 #if 0
  509 int      in_rt_getifa(struct rt_addrinfo *, u_int fibnum);
  510 int      in_rtioctl(u_long, caddr_t, u_int);
  511 int      in_rtrequest1(int, struct rt_addrinfo *, struct rtentry **, u_int);
  512 #endif
  513 
  514 

Cache object: adfb50b1f681920cc6c84427c68729bc


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.