The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/netinet/in_rmx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright 1994, 1995 Massachusetts Institute of Technology
    3  *
    4  * Permission to use, copy, modify, and distribute this software and
    5  * its documentation for any purpose and without fee is hereby
    6  * granted, provided that both the above copyright notice and this
    7  * permission notice appear in all copies, that both the above
    8  * copyright notice and this permission notice appear in all
    9  * supporting documentation, and that the name of M.I.T. not be used
   10  * in advertising or publicity pertaining to distribution of the
   11  * software without specific, written prior permission.  M.I.T. makes
   12  * no representations about the suitability of this software for any
   13  * purpose.  It is provided "as is" without express or implied
   14  * warranty.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
   17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
   18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
   20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
   22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
   24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
   25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
   26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  * $FreeBSD: src/sys/netinet/in_rmx.c,v 1.37.2.3 2002/08/09 14:49:23 ru Exp $
   30  * $DragonFly: src/sys/netinet/in_rmx.c,v 1.14 2006/04/11 06:59:34 dillon Exp $
   31  */
   32 
   33 /*
   34  * This code does two things necessary for the enhanced TCP metrics to
   35  * function in a useful manner:
   36  *  1) It marks all non-host routes as `cloning', thus ensuring that
   37  *     every actual reference to such a route actually gets turned
   38  *     into a reference to a host route to the specific destination
   39  *     requested.
   40  *  2) When such routes lose all their references, it arranges for them
   41  *     to be deleted in some random collection of circumstances, so that
   42  *     a large quantity of stale routing data is not kept in kernel memory
   43  *     indefinitely.  See in_rtqtimo() below for the exact mechanism.
   44  */
   45 
   46 #include "opt_carp.h"
   47 
   48 #include <sys/param.h>
   49 #include <sys/systm.h>
   50 #include <sys/kernel.h>
   51 #include <sys/sysctl.h>
   52 #include <sys/socket.h>
   53 #include <sys/mbuf.h>
   54 #include <sys/syslog.h>
   55 #include <sys/globaldata.h>
   56 #include <sys/thread2.h>
   57 
   58 #include <net/if.h>
   59 #include <net/route.h>
   60 #include <net/if_var.h>
   61 #ifdef CARP
   62 #include <net/if_types.h>
   63 #endif
   64 #include <net/netmsg2.h>
   65 #include <net/netisr2.h>
   66 #include <netinet/in.h>
   67 #include <netinet/in_var.h>
   68 #include <netinet/ip_var.h>
   69 #include <netinet/ip_flow.h>
   70 
   71 #define RTPRF_EXPIRING  RTF_PROTO3      /* set on routes we manage */
   72 
   73 struct in_rtqtimo_ctx {
   74         struct callout          timo_ch;
   75         struct netmsg_base      timo_nmsg;
   76         struct radix_node_head  *timo_rnh;
   77 } __cachealign;
   78 
   79 static void     in_rtqtimo(void *);
   80 
   81 static struct in_rtqtimo_ctx in_rtqtimo_context[MAXCPU];
   82 
   83 /*
   84  * Do what we need to do when inserting a route.
   85  */
   86 static struct radix_node *
   87 in_addroute(char *key, char *mask, struct radix_node_head *head,
   88             struct radix_node *treenodes)
   89 {
   90         struct rtentry *rt = (struct rtentry *)treenodes;
   91         struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt);
   92         struct radix_node *ret;
   93         struct in_ifaddr_container *iac;
   94         struct in_ifaddr *ia;
   95 
   96         /*
   97          * For IP, mark routes to multicast addresses as such, because
   98          * it's easy to do and might be useful (but this is much more
   99          * dubious since it's so easy to inspect the address).
  100          *
  101          * For IP, all unicast non-host routes are automatically cloning.
  102          */
  103         if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
  104                 rt->rt_flags |= RTF_MULTICAST;
  105 
  106         if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST)))
  107                 rt->rt_flags |= RTF_PRCLONING;
  108 
  109         /*
  110          *   For host routes, we make sure that RTF_BROADCAST
  111          *   is set for anything that looks like a broadcast address.
  112          *   This way, we can avoid an expensive call to in_broadcast()
  113          *   in ip_output() most of the time (because the route passed
  114          *   to ip_output() is almost always a host route).
  115          *
  116          *   For local routes we set RTF_LOCAL allowing various shortcuts.
  117          *
  118          *   A cloned network route will point to one of several possible
  119          *   addresses if an interface has aliases and must be repointed
  120          *   back to the correct address or arp_rtrequest() will not properly
  121          *   detect the local ip.
  122          */
  123         if (rt->rt_flags & RTF_HOST) {
  124                 if (in_broadcast(sin->sin_addr, rt->rt_ifp)) {
  125                         rt->rt_flags |= RTF_BROADCAST;
  126                 } else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr ==
  127                            sin->sin_addr.s_addr) {
  128                         rt->rt_flags |= RTF_LOCAL;
  129                 } else {
  130                         LIST_FOREACH(iac, INADDR_HASH(sin->sin_addr.s_addr),
  131                                      ia_hash) {
  132                                 ia = iac->ia;
  133                                 if (sin->sin_addr.s_addr ==
  134                                     ia->ia_addr.sin_addr.s_addr) {
  135                                         rt->rt_flags |= RTF_LOCAL;
  136                                         IFAREF(&ia->ia_ifa);
  137                                         IFAFREE(rt->rt_ifa);
  138                                         rt->rt_ifa = &ia->ia_ifa;
  139                                         rt->rt_ifp = rt->rt_ifa->ifa_ifp;
  140                                         break;
  141                                 }
  142                         }
  143                 }
  144         }
  145 
  146         if (rt->rt_rmx.rmx_mtu != 0 && !(rt->rt_rmx.rmx_locks & RTV_MTU) &&
  147             rt->rt_ifp != NULL)
  148                 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
  149 
  150         ret = rn_addroute(key, mask, head, treenodes);
  151         if (ret == NULL && (rt->rt_flags & RTF_HOST)) {
  152                 struct rtentry *oldrt;
  153 
  154                 /*
  155                  * We are trying to add a host route, but can't.
  156                  * Find out if it is because of an ARP entry and
  157                  * delete it if so.
  158                  */
  159                 oldrt = rtpurelookup((struct sockaddr *)sin);
  160                 if (oldrt != NULL) {
  161                         --oldrt->rt_refcnt;
  162                         if ((oldrt->rt_flags & RTF_LLINFO) &&
  163                             (oldrt->rt_flags & RTF_HOST) &&
  164                             oldrt->rt_gateway &&
  165                             oldrt->rt_gateway->sa_family == AF_LINK) {
  166                                 rtrequest(RTM_DELETE, rt_key(oldrt),
  167                                           oldrt->rt_gateway, rt_mask(oldrt),
  168                                           oldrt->rt_flags, NULL);
  169                                 ret = rn_addroute(key, mask, head, treenodes);
  170                         }
  171                 }
  172         }
  173 
  174         /*
  175          * If the new route has been created successfully, and it is
  176          * not a multicast/broadcast or cloned route, then we will
  177          * have to flush the ipflow.  Otherwise, we may end up using
  178          * the wrong route.
  179          */
  180         if (ret != NULL &&
  181             (rt->rt_flags &
  182              (RTF_MULTICAST | RTF_BROADCAST | RTF_WASCLONED)) == 0) {
  183                 ipflow_flush_oncpu();
  184         }
  185         return ret;
  186 }
  187 
  188 /*
  189  * This code is the inverse of in_closeroute: on first reference, if we
  190  * were managing the route, stop doing so and set the expiration timer
  191  * back off again.
  192  */
  193 static struct radix_node *
  194 in_matchroute(char *key, struct radix_node_head *head)
  195 {
  196         struct radix_node *rn = rn_match(key, head);
  197         struct rtentry *rt = (struct rtentry *)rn;
  198 
  199         if (rt != NULL && rt->rt_refcnt == 0) { /* this is first reference */
  200                 if (rt->rt_flags & RTPRF_EXPIRING) {
  201                         rt->rt_flags &= ~RTPRF_EXPIRING;
  202                         rt->rt_rmx.rmx_expire = 0;
  203                 }
  204         }
  205         return rn;
  206 }
  207 
  208 static int rtq_reallyold = 60*60;  /* one hour is ``really old'' */
  209 SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW,
  210     &rtq_reallyold , 0,
  211     "Default expiration time on cloned routes");
  212 
  213 static int rtq_minreallyold = 10;  /* never automatically crank down to less */
  214 SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW,
  215     &rtq_minreallyold , 0,
  216     "Minimum time to attempt to hold onto cloned routes");
  217 
  218 static int rtq_toomany = 128;      /* 128 cached routes is ``too many'' */
  219 SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW,
  220     &rtq_toomany , 0, "Upper limit on cloned routes");
  221 
  222 /*
  223  * On last reference drop, mark the route as belong to us so that it can be
  224  * timed out.
  225  */
  226 static void
  227 in_closeroute(struct radix_node *rn, struct radix_node_head *head)
  228 {
  229         struct rtentry *rt = (struct rtentry *)rn;
  230 
  231         if (!(rt->rt_flags & RTF_UP))
  232                 return;         /* prophylactic measures */
  233 
  234         if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST)
  235                 return;
  236 
  237         if ((rt->rt_flags & (RTF_WASCLONED | RTPRF_EXPIRING)) != RTF_WASCLONED)
  238                 return;
  239 
  240         /*
  241          * As requested by David Greenman:
  242          * If rtq_reallyold is 0, just delete the route without
  243          * waiting for a timeout cycle to kill it.
  244          */
  245         if (rtq_reallyold != 0) {
  246                 rt->rt_flags |= RTPRF_EXPIRING;
  247                 rt->rt_rmx.rmx_expire = time_uptime + rtq_reallyold;
  248         } else {
  249                 /*
  250                  * Remove route from the radix tree, but defer deallocation
  251                  * until we return to rtfree().
  252                  */
  253                 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt),
  254                           rt->rt_flags, &rt);
  255         }
  256 }
  257 
  258 struct rtqk_arg {
  259         struct radix_node_head *rnh;
  260         int draining;
  261         int killed;
  262         int found;
  263         int updating;
  264         time_t nextstop;
  265 };
  266 
  267 /*
  268  * Get rid of old routes.  When draining, this deletes everything, even when
  269  * the timeout is not expired yet.  When updating, this makes sure that
  270  * nothing has a timeout longer than the current value of rtq_reallyold.
  271  */
  272 static int
  273 in_rtqkill(struct radix_node *rn, void *rock)
  274 {
  275         struct rtqk_arg *ap = rock;
  276         struct rtentry *rt = (struct rtentry *)rn;
  277         int err;
  278 
  279         if (rt->rt_flags & RTPRF_EXPIRING) {
  280                 ap->found++;
  281                 if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) {
  282                         if (rt->rt_refcnt > 0)
  283                                 panic("rtqkill route really not free");
  284 
  285                         err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
  286                                         rt_mask(rt), rt->rt_flags, NULL);
  287                         if (err)
  288                                 log(LOG_WARNING, "in_rtqkill: error %d\n", err);
  289                         else
  290                                 ap->killed++;
  291                 } else {
  292                         if (ap->updating &&
  293                             (int)(rt->rt_rmx.rmx_expire - time_uptime) >
  294                              rtq_reallyold) {
  295                                 rt->rt_rmx.rmx_expire = time_uptime +
  296                                     rtq_reallyold;
  297                         }
  298                         ap->nextstop = lmin(ap->nextstop,
  299                                             rt->rt_rmx.rmx_expire);
  300                 }
  301         }
  302 
  303         return 0;
  304 }
  305 
  306 #define RTQ_TIMEOUT     60*10   /* run no less than once every ten minutes */
  307 static int rtq_timeout = RTQ_TIMEOUT;
  308 
  309 /*
  310  * NOTE:
  311  * 'last_adjusted_timeout' and 'rtq_reallyold' are _not_ read-only, and
  312  * could be changed by all CPUs.  However, they are changed at so low
  313  * frequency that we could ignore the cache trashing issue and take them
  314  * as read-mostly.
  315  */
  316 static void
  317 in_rtqtimo_dispatch(netmsg_t nmsg)
  318 {
  319         struct rtqk_arg arg;
  320         struct timeval atv;
  321         static time_t last_adjusted_timeout = 0;
  322         struct in_rtqtimo_ctx *ctx = &in_rtqtimo_context[mycpuid];
  323         struct radix_node_head *rnh = ctx->timo_rnh;
  324 
  325         /* Reply ASAP */
  326         crit_enter();
  327         lwkt_replymsg(&nmsg->lmsg, 0);
  328         crit_exit();
  329 
  330         arg.found = arg.killed = 0;
  331         arg.rnh = rnh;
  332         arg.nextstop = time_uptime + rtq_timeout;
  333         arg.draining = arg.updating = 0;
  334         rnh->rnh_walktree(rnh, in_rtqkill, &arg);
  335 
  336         /*
  337          * Attempt to be somewhat dynamic about this:
  338          * If there are ``too many'' routes sitting around taking up space,
  339          * then crank down the timeout, and see if we can't make some more
  340          * go away.  However, we make sure that we will never adjust more
  341          * than once in rtq_timeout seconds, to keep from cranking down too
  342          * hard.
  343          */
  344         if ((arg.found - arg.killed > rtq_toomany) &&
  345             (int)(time_uptime - last_adjusted_timeout) >= rtq_timeout &&
  346             rtq_reallyold > rtq_minreallyold) {
  347                 rtq_reallyold = 2*rtq_reallyold / 3;
  348                 if (rtq_reallyold < rtq_minreallyold) {
  349                         rtq_reallyold = rtq_minreallyold;
  350                 }
  351 
  352                 last_adjusted_timeout = time_uptime;
  353 #ifdef DIAGNOSTIC
  354                 log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n",
  355                     rtq_reallyold);
  356 #endif
  357                 arg.found = arg.killed = 0;
  358                 arg.updating = 1;
  359                 rnh->rnh_walktree(rnh, in_rtqkill, &arg);
  360         }
  361 
  362         atv.tv_usec = 0;
  363         atv.tv_sec = arg.nextstop - time_uptime;
  364         if ((int)atv.tv_sec < 1) {              /* time shift safety */
  365                 atv.tv_sec = 1;
  366                 arg.nextstop = time_uptime + atv.tv_sec;
  367         }
  368         if ((int)atv.tv_sec > rtq_timeout) {    /* time shift safety */
  369                 atv.tv_sec = rtq_timeout;
  370                 arg.nextstop = time_uptime + atv.tv_sec;
  371         }
  372         callout_reset(&ctx->timo_ch, tvtohz_high(&atv), in_rtqtimo, NULL);
  373 }
  374 
  375 static void
  376 in_rtqtimo(void *arg __unused)
  377 {
  378         int cpuid = mycpuid;
  379         struct lwkt_msg *lmsg = &in_rtqtimo_context[cpuid].timo_nmsg.lmsg;
  380 
  381         crit_enter();
  382         if (lmsg->ms_flags & MSGF_DONE)
  383                 lwkt_sendmsg_oncpu(netisr_cpuport(cpuid), lmsg);
  384         crit_exit();
  385 }
  386 
  387 void
  388 in_rtqdrain(void)
  389 {
  390         struct radix_node_head *rnh = rt_tables[mycpuid][AF_INET];
  391         struct rtqk_arg arg;
  392 
  393         arg.found = arg.killed = 0;
  394         arg.rnh = rnh;
  395         arg.nextstop = 0;
  396         arg.draining = 1;
  397         arg.updating = 0;
  398         crit_enter();
  399         rnh->rnh_walktree(rnh, in_rtqkill, &arg);
  400         crit_exit();
  401 }
  402 
  403 /*
  404  * Initialize our routing tree.
  405  */
  406 int
  407 in_inithead(void **head, int off)
  408 {
  409         struct radix_node_head *rnh;
  410         struct in_rtqtimo_ctx *ctx;
  411         int cpuid = mycpuid;
  412 
  413         if (!rn_inithead(head, rn_cpumaskhead(cpuid), off))
  414                 return 0;
  415 
  416         if (head != (void **)&rt_tables[cpuid][AF_INET]) /* BOGUS! */
  417                 return 1;       /* only do this for the real routing table */
  418 
  419         rnh = *head;
  420         rnh->rnh_addaddr = in_addroute;
  421         rnh->rnh_matchaddr = in_matchroute;
  422         rnh->rnh_close = in_closeroute;
  423 
  424         ctx = &in_rtqtimo_context[cpuid];
  425         ctx->timo_rnh = rnh;
  426         callout_init_mp(&ctx->timo_ch);
  427         netmsg_init(&ctx->timo_nmsg, NULL, &netisr_adone_rport, 0,
  428                     in_rtqtimo_dispatch);
  429 
  430         in_rtqtimo(NULL);       /* kick off timeout first time */
  431         return 1;
  432 }
  433 
  434 /*
  435  * This zaps old routes when the interface goes down or interface
  436  * address is deleted.  In the latter case, it deletes static routes
  437  * that point to this address.  If we don't do this, we may end up
  438  * using the old address in the future.  The ones we always want to
  439  * get rid of are things like ARP entries, since the user might down
  440  * the interface, walk over to a completely different network, and
  441  * plug back in.
  442  *
  443  * in_ifadown() is typically called when an interface is being brought
  444  * down.  We must iterate through all per-cpu route tables and clean
  445  * them up.
  446  */
  447 struct in_ifadown_arg {
  448         struct radix_node_head *rnh;
  449         struct ifaddr *ifa;
  450         int del;
  451 };
  452 
  453 static int
  454 in_ifadownkill(struct radix_node *rn, void *xap)
  455 {
  456         struct in_ifadown_arg *ap = xap;
  457         struct rtentry *rt = (struct rtentry *)rn;
  458         int err;
  459 
  460         if (rt->rt_ifa == ap->ifa &&
  461             (ap->del || !(rt->rt_flags & RTF_STATIC))) {
  462                 /*
  463                  * We need to disable the automatic prune that happens
  464                  * in this case in rtrequest() because it will blow
  465                  * away the pointers that rn_walktree() needs in order
  466                  * continue our descent.  We will end up deleting all
  467                  * the routes that rtrequest() would have in any case,
  468                  * so that behavior is not needed there.
  469                  */
  470                 rt->rt_flags &= ~(RTF_CLONING | RTF_PRCLONING);
  471                 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
  472                                 rt_mask(rt), rt->rt_flags, NULL);
  473                 if (err)
  474                         log(LOG_WARNING, "in_ifadownkill: error %d\n", err);
  475         }
  476         return 0;
  477 }
  478 
  479 struct netmsg_ifadown {
  480         struct netmsg_base      base;
  481         struct ifaddr           *ifa;
  482         int                     del;
  483 };
  484 
  485 static void
  486 in_ifadown_dispatch(netmsg_t msg)
  487 {
  488         struct netmsg_ifadown *rmsg = (void *)msg;
  489         struct radix_node_head *rnh;
  490         struct ifaddr *ifa = rmsg->ifa;
  491         struct in_ifadown_arg arg;
  492         int nextcpu, cpu;
  493 
  494         cpu = mycpuid;
  495 
  496         arg.rnh = rnh = rt_tables[cpu][AF_INET];
  497         arg.ifa = ifa;
  498         arg.del = rmsg->del;
  499         rnh->rnh_walktree(rnh, in_ifadownkill, &arg);
  500         ifa->ifa_flags &= ~IFA_ROUTE;
  501 
  502         nextcpu = cpu + 1;
  503         if (nextcpu < ncpus)
  504                 lwkt_forwardmsg(netisr_cpuport(nextcpu), &rmsg->base.lmsg);
  505         else
  506                 lwkt_replymsg(&rmsg->base.lmsg, 0);
  507 }
  508 
  509 int
  510 in_ifadown_force(struct ifaddr *ifa, int delete)
  511 {
  512         struct netmsg_ifadown msg;
  513 
  514         if (ifa->ifa_addr->sa_family != AF_INET)
  515                 return 1;
  516 
  517         /*
  518          * XXX individual requests are not independantly chained,
  519          * which means that the per-cpu route tables will not be
  520          * consistent in the middle of the operation.  If routes
  521          * related to the interface are manipulated while we are
  522          * doing this the inconsistancy could trigger a panic.
  523          */
  524         netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
  525             in_ifadown_dispatch);
  526         msg.ifa = ifa;
  527         msg.del = delete;
  528         rt_domsg_global(&msg.base);
  529 
  530         return 0;
  531 }
  532 
  533 int
  534 in_ifadown(struct ifaddr *ifa, int delete)
  535 {
  536 #ifdef CARP
  537         if (ifa->ifa_ifp->if_type == IFT_CARP)
  538                 return 0;
  539 #endif
  540         return in_ifadown_force(ifa, delete);
  541 }

Cache object: 9964cc5bb793370ff313a39560c61c66


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.