The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/net/flowtable.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2014 Gleb Smirnoff <glebius@FreeBSD.org>
    3  * Copyright (c) 2008-2010, BitGravity Inc.
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions are met:
    8  *
    9  *  1. Redistributions of source code must retain the above copyright notice,
   10  *     this list of conditions and the following disclaimer.
   11  *
   12  *  2. Neither the name of the BitGravity Corporation nor the names of its
   13  *     contributors may be used to endorse or promote products derived from
   14  *     this software without specific prior written permission.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   26  * POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 #include "opt_route.h"
   30 #include "opt_mpath.h"
   31 #include "opt_ddb.h"
   32 #include "opt_inet.h"
   33 #include "opt_inet6.h"
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/11.1/sys/net/flowtable.c 302378 2016-07-06 17:46:49Z nwhitehorn $");
   37 
   38 #include <sys/param.h>
   39 #include <sys/types.h>
   40 #include <sys/bitstring.h>
   41 #include <sys/condvar.h>
   42 #include <sys/callout.h>
   43 #include <sys/hash.h>
   44 #include <sys/kernel.h>
   45 #include <sys/kthread.h>
   46 #include <sys/limits.h>
   47 #include <sys/malloc.h>
   48 #include <sys/mbuf.h>
   49 #include <sys/pcpu.h>
   50 #include <sys/proc.h>
   51 #include <sys/queue.h>
   52 #include <sys/sbuf.h>
   53 #include <sys/sched.h>
   54 #include <sys/smp.h>
   55 #include <sys/socket.h>
   56 #include <sys/syslog.h>
   57 #include <sys/sysctl.h>
   58 #include <vm/uma.h>
   59 
   60 #include <net/if.h>
   61 #include <net/if_llatbl.h>
   62 #include <net/if_var.h>
   63 #include <net/route.h>
   64 #include <net/flowtable.h>
   65 #include <net/vnet.h>
   66 
   67 #include <netinet/in.h>
   68 #include <netinet/in_systm.h>
   69 #include <netinet/in_var.h>
   70 #include <netinet/if_ether.h>
   71 #include <netinet/ip.h>
   72 #ifdef INET6
   73 #include <netinet/ip6.h>
   74 #endif
   75 #ifdef FLOWTABLE_HASH_ALL
   76 #include <netinet/tcp.h>
   77 #include <netinet/udp.h>
   78 #include <netinet/sctp.h>
   79 #endif
   80 
   81 #include <ddb/ddb.h>
   82 
   83 #ifdef  FLOWTABLE_HASH_ALL
   84 #define KEY_PORTS       (sizeof(uint16_t) * 2)
   85 #define KEY_ADDRS       2
   86 #else
   87 #define KEY_PORTS       0
   88 #define KEY_ADDRS       1
   89 #endif
   90 
   91 #ifdef  INET6
   92 #define KEY_ADDR_LEN    sizeof(struct in6_addr)
   93 #else
   94 #define KEY_ADDR_LEN    sizeof(struct in_addr)
   95 #endif
   96 
   97 #define KEYLEN  ((KEY_ADDR_LEN * KEY_ADDRS + KEY_PORTS) / sizeof(uint32_t))
   98 
   99 struct flentry {
  100         uint32_t                f_hash;         /* hash flowing forward */
  101         uint32_t                f_key[KEYLEN];  /* address(es and ports) */
  102         uint32_t                f_uptime;       /* uptime at last access */
  103         uint16_t                f_fibnum;       /* fib index */
  104 #ifdef FLOWTABLE_HASH_ALL
  105         uint8_t                 f_proto;        /* protocol */
  106         uint8_t                 f_flags;        /* stale? */
  107 #define FL_STALE                1
  108 #endif
  109         SLIST_ENTRY(flentry)    f_next;         /* pointer to collision entry */
  110         struct rtentry          *f_rt;          /* rtentry for flow */
  111         struct llentry          *f_lle;         /* llentry for flow */
  112 };
  113 #undef KEYLEN
  114 
  115 SLIST_HEAD(flist, flentry);
  116 /* Make sure we can use pcpu_zone_ptr for struct flist. */
  117 CTASSERT(sizeof(struct flist) == sizeof(void *));
  118 
  119 struct flowtable {
  120         counter_u64_t   *ft_stat;
  121         int             ft_size;
  122         /*
  123          * ft_table is a malloc(9)ed array of pointers.  Pointers point to
  124          * memory from UMA_ZONE_PCPU zone.
  125          * ft_masks is per-cpu pointer itself.  Each instance points
  126          * to a malloc(9)ed bitset, that is private to corresponding CPU.
  127          */
  128         struct flist    **ft_table;
  129         bitstr_t        **ft_masks;
  130         bitstr_t        *ft_tmpmask;
  131 };
  132 
  133 #define FLOWSTAT_ADD(ft, name, v)       \
  134         counter_u64_add((ft)->ft_stat[offsetof(struct flowtable_stat, name) / sizeof(uint64_t)], (v))
  135 #define FLOWSTAT_INC(ft, name)  FLOWSTAT_ADD(ft, name, 1)
  136 
  137 static struct proc *flowcleanerproc;
  138 static uint32_t flow_hashjitter;
  139 
  140 static struct cv        flowclean_f_cv;
  141 static struct cv        flowclean_c_cv;
  142 static struct mtx       flowclean_lock;
  143 static uint32_t         flowclean_cycles;
  144 
  145 /*
  146  * TODO:
  147  * - add sysctls to resize && flush flow tables
  148  * - Add per flowtable sysctls for statistics and configuring timeouts
  149  * - add saturation counter to rtentry to support per-packet load-balancing
  150  *   add flag to indicate round-robin flow, add list lookup from head
  151      for flows
  152  * - add sysctl / device node / syscall to support exporting and importing
  153  *   of flows with flag to indicate that a flow was imported so should
  154  *   not be considered for auto-cleaning
  155  * - support explicit connection state (currently only ad-hoc for DSR)
  156  * - idetach() cleanup for options VIMAGE builds.
  157  */
  158 #ifdef INET
  159 static VNET_DEFINE(struct flowtable, ip4_ft);
  160 #define V_ip4_ft        VNET(ip4_ft)
  161 #endif
  162 #ifdef INET6
  163 static VNET_DEFINE(struct flowtable, ip6_ft);
  164 #define V_ip6_ft        VNET(ip6_ft)
  165 #endif
  166 
  167 static uma_zone_t flow_zone;
  168 
  169 static VNET_DEFINE(int, flowtable_enable) = 1;
  170 #define V_flowtable_enable              VNET(flowtable_enable)
  171 
  172 static SYSCTL_NODE(_net, OID_AUTO, flowtable, CTLFLAG_RD, NULL,
  173     "flowtable");
  174 SYSCTL_INT(_net_flowtable, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
  175     &VNET_NAME(flowtable_enable), 0, "enable flowtable caching.");
  176 SYSCTL_UMA_MAX(_net_flowtable, OID_AUTO, maxflows, CTLFLAG_RW,
  177     &flow_zone, "Maximum number of flows allowed");
  178 
  179 static MALLOC_DEFINE(M_FTABLE, "flowtable", "flowtable hashes and bitstrings");
  180 
  181 static struct flentry *
  182 flowtable_lookup_common(struct flowtable *, uint32_t *, int, uint32_t);
  183 
  184 #ifdef INET
  185 static struct flentry *
  186 flowtable_lookup_ipv4(struct mbuf *m, struct route *ro)
  187 {
  188         struct flentry *fle;
  189         struct sockaddr_in *sin;
  190         struct ip *ip;
  191         uint32_t fibnum;
  192 #ifdef FLOWTABLE_HASH_ALL
  193         uint32_t key[3];
  194         int iphlen;
  195         uint16_t sport, dport;
  196         uint8_t proto;
  197 #endif
  198 
  199         ip = mtod(m, struct ip *);
  200 
  201         if (ip->ip_src.s_addr == ip->ip_dst.s_addr ||
  202             (ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
  203             (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
  204                 return (NULL);
  205 
  206         fibnum = M_GETFIB(m);
  207 
  208 #ifdef FLOWTABLE_HASH_ALL
  209         iphlen = ip->ip_hl << 2;
  210         proto = ip->ip_p;
  211 
  212         switch (proto) {
  213         case IPPROTO_TCP: {
  214                 struct tcphdr *th;
  215 
  216                 th = (struct tcphdr *)((char *)ip + iphlen);
  217                 sport = th->th_sport;
  218                 dport = th->th_dport;
  219                 if (th->th_flags & (TH_RST|TH_FIN))
  220                         fibnum |= (FL_STALE << 24);
  221                 break;
  222         }
  223         case IPPROTO_UDP: {
  224                 struct udphdr *uh;
  225 
  226                 uh = (struct udphdr *)((char *)ip + iphlen);
  227                 sport = uh->uh_sport;
  228                 dport = uh->uh_dport;
  229                 break;
  230         }
  231         case IPPROTO_SCTP: {
  232                 struct sctphdr *sh;
  233 
  234                 sh = (struct sctphdr *)((char *)ip + iphlen);
  235                 sport = sh->src_port;
  236                 dport = sh->dest_port;
  237                 /* XXXGL: handle stale? */
  238                 break;
  239         }
  240         default:
  241                 sport = dport = 0;
  242                 break;
  243         }
  244 
  245         key[0] = ip->ip_dst.s_addr;
  246         key[1] = ip->ip_src.s_addr;
  247         key[2] = (dport << 16) | sport;
  248         fibnum |= proto << 16;
  249 
  250         fle = flowtable_lookup_common(&V_ip4_ft, key, 3 * sizeof(uint32_t),
  251             fibnum);
  252 
  253 #else   /* !FLOWTABLE_HASH_ALL */
  254 
  255         fle = flowtable_lookup_common(&V_ip4_ft, (uint32_t *)&ip->ip_dst,
  256             sizeof(struct in_addr), fibnum);
  257 
  258 #endif  /* FLOWTABLE_HASH_ALL */
  259 
  260         if (fle == NULL)
  261                 return (NULL);
  262 
  263         sin = (struct sockaddr_in *)&ro->ro_dst;
  264         sin->sin_family = AF_INET;
  265         sin->sin_len = sizeof(*sin);
  266         sin->sin_addr = ip->ip_dst;
  267 
  268         return (fle);
  269 }
  270 #endif /* INET */
  271 
  272 #ifdef INET6
  273 /*
  274  * PULLUP_TO(len, p, T) makes sure that len + sizeof(T) is contiguous,
  275  * then it sets p to point at the offset "len" in the mbuf. WARNING: the
  276  * pointer might become stale after other pullups (but we never use it
  277  * this way).
  278  */
  279 #define PULLUP_TO(_len, p, T)                                           \
  280 do {                                                                    \
  281         int x = (_len) + sizeof(T);                                     \
  282         if ((m)->m_len < x)                                             \
  283                 return (NULL);                                          \
  284         p = (mtod(m, char *) + (_len));                                 \
  285 } while (0)
  286 
  287 #define TCP(p)          ((struct tcphdr *)(p))
  288 #define SCTP(p)         ((struct sctphdr *)(p))
  289 #define UDP(p)          ((struct udphdr *)(p))
  290 
  291 static struct flentry *
  292 flowtable_lookup_ipv6(struct mbuf *m, struct route *ro)
  293 {
  294         struct flentry *fle;
  295         struct sockaddr_in6 *sin6;
  296         struct ip6_hdr *ip6;
  297         uint32_t fibnum;
  298 #ifdef FLOWTABLE_HASH_ALL
  299         uint32_t key[9];
  300         void *ulp;
  301         int hlen;
  302         uint16_t sport, dport;
  303         u_short offset;
  304         uint8_t proto;
  305 #else
  306         uint32_t key[4];
  307 #endif
  308 
  309         ip6 = mtod(m, struct ip6_hdr *);
  310         if (in6_localaddr(&ip6->ip6_dst))
  311                 return (NULL);
  312 
  313         fibnum = M_GETFIB(m);
  314 
  315 #ifdef  FLOWTABLE_HASH_ALL
  316         hlen = sizeof(struct ip6_hdr);
  317         proto = ip6->ip6_nxt;
  318         offset = sport = dport = 0;
  319         ulp = NULL;
  320         while (ulp == NULL) {
  321                 switch (proto) {
  322                 case IPPROTO_ICMPV6:
  323                 case IPPROTO_OSPFIGP:
  324                 case IPPROTO_PIM:
  325                 case IPPROTO_CARP:
  326                 case IPPROTO_ESP:
  327                 case IPPROTO_NONE:
  328                         ulp = ip6;
  329                         break;
  330                 case IPPROTO_TCP:
  331                         PULLUP_TO(hlen, ulp, struct tcphdr);
  332                         dport = TCP(ulp)->th_dport;
  333                         sport = TCP(ulp)->th_sport;
  334                         if (TCP(ulp)->th_flags & (TH_RST|TH_FIN))
  335                                 fibnum |= (FL_STALE << 24);
  336                         break;
  337                 case IPPROTO_SCTP:
  338                         PULLUP_TO(hlen, ulp, struct sctphdr);
  339                         dport = SCTP(ulp)->src_port;
  340                         sport = SCTP(ulp)->dest_port;
  341                         /* XXXGL: handle stale? */
  342                         break;
  343                 case IPPROTO_UDP:
  344                         PULLUP_TO(hlen, ulp, struct udphdr);
  345                         dport = UDP(ulp)->uh_dport;
  346                         sport = UDP(ulp)->uh_sport;
  347                         break;
  348                 case IPPROTO_HOPOPTS:   /* RFC 2460 */
  349                         PULLUP_TO(hlen, ulp, struct ip6_hbh);
  350                         hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3;
  351                         proto = ((struct ip6_hbh *)ulp)->ip6h_nxt;
  352                         ulp = NULL;
  353                         break;
  354                 case IPPROTO_ROUTING:   /* RFC 2460 */
  355                         PULLUP_TO(hlen, ulp, struct ip6_rthdr);
  356                         hlen += (((struct ip6_rthdr *)ulp)->ip6r_len + 1) << 3;
  357                         proto = ((struct ip6_rthdr *)ulp)->ip6r_nxt;
  358                         ulp = NULL;
  359                         break;
  360                 case IPPROTO_FRAGMENT:  /* RFC 2460 */
  361                         PULLUP_TO(hlen, ulp, struct ip6_frag);
  362                         hlen += sizeof (struct ip6_frag);
  363                         proto = ((struct ip6_frag *)ulp)->ip6f_nxt;
  364                         offset = ((struct ip6_frag *)ulp)->ip6f_offlg &
  365                             IP6F_OFF_MASK;
  366                         ulp = NULL;
  367                         break;
  368                 case IPPROTO_DSTOPTS:   /* RFC 2460 */
  369                         PULLUP_TO(hlen, ulp, struct ip6_hbh);
  370                         hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3;
  371                         proto = ((struct ip6_hbh *)ulp)->ip6h_nxt;
  372                         ulp = NULL;
  373                         break;
  374                 case IPPROTO_AH:        /* RFC 2402 */
  375                         PULLUP_TO(hlen, ulp, struct ip6_ext);
  376                         hlen += (((struct ip6_ext *)ulp)->ip6e_len + 2) << 2;
  377                         proto = ((struct ip6_ext *)ulp)->ip6e_nxt;
  378                         ulp = NULL;
  379                         break;
  380                 default:
  381                         PULLUP_TO(hlen, ulp, struct ip6_ext);
  382                         break;
  383                 }
  384         }
  385 
  386         bcopy(&ip6->ip6_dst, &key[0], sizeof(struct in6_addr));
  387         bcopy(&ip6->ip6_src, &key[4], sizeof(struct in6_addr));
  388         key[8] = (dport << 16) | sport;
  389         fibnum |= proto << 16;
  390 
  391         fle = flowtable_lookup_common(&V_ip6_ft, key, 9 * sizeof(uint32_t),
  392             fibnum);
  393 #else   /* !FLOWTABLE_HASH_ALL */
  394         bcopy(&ip6->ip6_dst, &key[0], sizeof(struct in6_addr));
  395         fle = flowtable_lookup_common(&V_ip6_ft, key, sizeof(struct in6_addr),
  396             fibnum);
  397 #endif  /* FLOWTABLE_HASH_ALL */
  398 
  399         if (fle == NULL)
  400                 return (NULL);
  401 
  402         sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
  403         sin6->sin6_family = AF_INET6;
  404         sin6->sin6_len = sizeof(*sin6);
  405         bcopy(&ip6->ip6_dst, &sin6->sin6_addr, sizeof(struct in6_addr));
  406 
  407         return (fle);
  408 }
  409 #endif /* INET6 */
  410 
  411 static bitstr_t *
  412 flowtable_mask(struct flowtable *ft)
  413 {
  414 
  415         /*
  416          * flowtable_free_stale() calls w/o critical section, but
  417          * with sched_bind(). Since pointer is stable throughout
  418          * ft lifetime, it is safe, otherwise...
  419          *
  420          * CRITICAL_ASSERT(curthread);
  421          */
  422 
  423         return (*(bitstr_t **)zpcpu_get(ft->ft_masks));
  424 }
  425 
  426 static struct flist *
  427 flowtable_list(struct flowtable *ft, uint32_t hash)
  428 {
  429 
  430         CRITICAL_ASSERT(curthread);
  431         return (zpcpu_get(ft->ft_table[hash % ft->ft_size]));
  432 }
  433 
  434 static int
  435 flow_stale(struct flowtable *ft, struct flentry *fle, int maxidle)
  436 {
  437 
  438         if (((fle->f_rt->rt_flags & RTF_UP) == 0) ||
  439             (fle->f_rt->rt_ifp == NULL) ||
  440             !RT_LINK_IS_UP(fle->f_rt->rt_ifp) ||
  441             (fle->f_lle->la_flags & LLE_VALID) == 0)
  442                 return (1);
  443 
  444         if (time_uptime - fle->f_uptime > maxidle)
  445                 return (1);
  446 
  447 #ifdef FLOWTABLE_HASH_ALL
  448         if (fle->f_flags & FL_STALE)
  449                 return (1);
  450 #endif
  451 
  452         return (0);
  453 }
  454 
  455 static int
  456 flow_full(void)
  457 {
  458         int count, max;
  459 
  460         count = uma_zone_get_cur(flow_zone);
  461         max = uma_zone_get_max(flow_zone);
  462 
  463         return (count > (max - (max >> 3)));
  464 }
  465 
  466 static int
  467 flow_matches(struct flentry *fle, uint32_t *key, int keylen, uint32_t fibnum)
  468 {
  469 #ifdef FLOWTABLE_HASH_ALL
  470         uint8_t proto;
  471 
  472         proto = (fibnum >> 16) & 0xff;
  473         fibnum &= 0xffff;
  474 #endif
  475 
  476         CRITICAL_ASSERT(curthread);
  477 
  478         /* Microoptimization for IPv4: don't use bcmp(). */
  479         if (((keylen == sizeof(uint32_t) && (fle->f_key[0] == key[0])) ||
  480             (bcmp(fle->f_key, key, keylen) == 0)) &&
  481             fibnum == fle->f_fibnum &&
  482 #ifdef FLOWTABLE_HASH_ALL
  483             proto == fle->f_proto &&
  484 #endif
  485             (fle->f_rt->rt_flags & RTF_UP) &&
  486             fle->f_rt->rt_ifp != NULL &&
  487             (fle->f_lle->la_flags & LLE_VALID))
  488                 return (1);
  489 
  490         return (0);
  491 }
  492 
  493 static struct flentry *
  494 flowtable_insert(struct flowtable *ft, uint32_t hash, uint32_t *key,
  495     int keylen, uint32_t fibnum0)
  496 {
  497 #ifdef INET6
  498         struct route_in6 sro6;
  499 #endif
  500 #ifdef INET
  501         struct route sro;
  502 #endif
  503         struct route *ro = NULL;
  504         struct rtentry *rt;
  505         struct lltable *lt = NULL;
  506         struct llentry *lle;
  507         struct sockaddr_storage *l3addr;
  508         struct ifnet *ifp;
  509         struct flist *flist;
  510         struct flentry *fle, *iter;
  511         bitstr_t *mask;
  512         uint16_t fibnum = fibnum0;
  513 #ifdef FLOWTABLE_HASH_ALL
  514         uint8_t proto;
  515 
  516         proto = (fibnum0 >> 16) & 0xff;
  517         fibnum = fibnum0 & 0xffff;
  518 #endif
  519 
  520         /*
  521          * This bit of code ends up locking the
  522          * same route 3 times (just like ip_output + ether_output)
  523          * - at lookup
  524          * - in rt_check when called by arpresolve
  525          * - dropping the refcount for the rtentry
  526          *
  527          * This could be consolidated to one if we wrote a variant
  528          * of arpresolve with an rt_check variant that expected to
  529          * receive the route locked
  530          */
  531 #ifdef INET
  532         if (ft == &V_ip4_ft) {
  533                 struct sockaddr_in *sin;
  534 
  535                 ro = &sro;
  536                 bzero(&sro.ro_dst, sizeof(sro.ro_dst));
  537 
  538                 sin = (struct sockaddr_in *)&sro.ro_dst;
  539                 sin->sin_family = AF_INET;
  540                 sin->sin_len = sizeof(*sin);
  541                 sin->sin_addr.s_addr = key[0];
  542         }
  543 #endif
  544 #ifdef INET6
  545         if (ft == &V_ip6_ft) {
  546                 struct sockaddr_in6 *sin6;
  547 
  548                 ro = (struct route *)&sro6;
  549                 sin6 = &sro6.ro_dst;
  550 
  551                 bzero(sin6, sizeof(*sin6));
  552                 sin6->sin6_family = AF_INET6;
  553                 sin6->sin6_len = sizeof(*sin6);
  554                 bcopy(key, &sin6->sin6_addr, sizeof(struct in6_addr));
  555         }
  556 #endif
  557 
  558         ro->ro_rt = NULL;
  559 #ifdef RADIX_MPATH
  560         rtalloc_mpath_fib(ro, hash, fibnum);
  561 #else
  562         rtalloc_ign_fib(ro, 0, fibnum);
  563 #endif
  564         if (ro->ro_rt == NULL)
  565                 return (NULL);
  566 
  567         rt = ro->ro_rt;
  568         ifp = rt->rt_ifp;
  569 
  570         if (ifp->if_flags & (IFF_POINTOPOINT | IFF_LOOPBACK)) {
  571                 RTFREE(rt);
  572                 return (NULL);
  573         }
  574 
  575 #ifdef INET
  576         if (ft == &V_ip4_ft)
  577                 lt = LLTABLE(ifp);
  578 #endif
  579 #ifdef INET6
  580         if (ft == &V_ip6_ft)
  581                 lt = LLTABLE6(ifp);
  582 #endif
  583 
  584         if (rt->rt_flags & RTF_GATEWAY)
  585                 l3addr = (struct sockaddr_storage *)rt->rt_gateway;
  586         else
  587                 l3addr = (struct sockaddr_storage *)&ro->ro_dst;
  588         lle = llentry_alloc(ifp, lt, l3addr);
  589 
  590         if (lle == NULL) {
  591                 RTFREE(rt);
  592                 return (NULL);
  593         }
  594 
  595         /* Don't insert the entry if the ARP hasn't yet finished resolving. */
  596         if ((lle->la_flags & LLE_VALID) == 0) {
  597                 RTFREE(rt);
  598                 LLE_FREE(lle);
  599                 FLOWSTAT_INC(ft, ft_fail_lle_invalid);
  600                 return (NULL);
  601         }
  602 
  603         fle = uma_zalloc(flow_zone, M_NOWAIT | M_ZERO);
  604         if (fle == NULL) {
  605                 RTFREE(rt);
  606                 LLE_FREE(lle);
  607                 return (NULL);
  608         }
  609 
  610         fle->f_hash = hash;
  611         bcopy(key, &fle->f_key, keylen);
  612         fle->f_rt = rt;
  613         fle->f_lle = lle;
  614         fle->f_fibnum = fibnum;
  615         fle->f_uptime = time_uptime;
  616 #ifdef FLOWTABLE_HASH_ALL
  617         fle->f_proto = proto;
  618         fle->f_flags = fibnum0 >> 24;
  619 #endif
  620 
  621         critical_enter();
  622         mask = flowtable_mask(ft);
  623         flist = flowtable_list(ft, hash);
  624 
  625         if (SLIST_EMPTY(flist)) {
  626                 bit_set(mask, (hash % ft->ft_size));
  627                 SLIST_INSERT_HEAD(flist, fle, f_next);
  628                 goto skip;
  629         }
  630 
  631         /*
  632          * find end of list and make sure that we were not
  633          * preempted by another thread handling this flow
  634          */
  635         SLIST_FOREACH(iter, flist, f_next) {
  636                 KASSERT(iter->f_hash % ft->ft_size == hash % ft->ft_size,
  637                     ("%s: wrong hash", __func__));
  638                 if (flow_matches(iter, key, keylen, fibnum)) {
  639                         /*
  640                          * We probably migrated to an other CPU after
  641                          * lookup in flowtable_lookup_common() failed.
  642                          * It appeared that this CPU already has flow
  643                          * entry.
  644                          */
  645                         iter->f_uptime = time_uptime;
  646 #ifdef FLOWTABLE_HASH_ALL
  647                         iter->f_flags |= fibnum >> 24;
  648 #endif
  649                         critical_exit();
  650                         FLOWSTAT_INC(ft, ft_collisions);
  651                         uma_zfree(flow_zone, fle);
  652                         return (iter);
  653                 }
  654         }
  655 
  656         SLIST_INSERT_HEAD(flist, fle, f_next);
  657 skip:
  658         critical_exit();
  659         FLOWSTAT_INC(ft, ft_inserts);
  660 
  661         return (fle);
  662 }
  663 
  664 int
  665 flowtable_lookup(sa_family_t sa, struct mbuf *m, struct route *ro)
  666 {
  667         struct flentry *fle;
  668         struct llentry *lle;
  669 
  670         if (V_flowtable_enable == 0)
  671                 return (ENXIO);
  672 
  673         switch (sa) {
  674 #ifdef INET
  675         case AF_INET:
  676                 fle = flowtable_lookup_ipv4(m, ro);
  677                 break;
  678 #endif
  679 #ifdef INET6
  680         case AF_INET6:
  681                 fle = flowtable_lookup_ipv6(m, ro);
  682                 break;
  683 #endif
  684         default:
  685                 panic("%s: sa %d", __func__, sa);
  686         }
  687 
  688         if (fle == NULL)
  689                 return (EHOSTUNREACH);
  690 
  691         if (M_HASHTYPE_GET(m) == M_HASHTYPE_NONE) {
  692                 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE_HASH);
  693                 m->m_pkthdr.flowid = fle->f_hash;
  694         }
  695 
  696         ro->ro_rt = fle->f_rt;
  697         ro->ro_flags |= RT_NORTREF;
  698         lle = fle->f_lle;
  699         if (lle != NULL && (lle->la_flags & LLE_VALID))
  700                 ro->ro_lle = lle;       /* share ref with fle->f_lle */
  701 
  702         return (0);
  703 }
  704 
  705 static struct flentry *
  706 flowtable_lookup_common(struct flowtable *ft, uint32_t *key, int keylen,
  707     uint32_t fibnum)
  708 {
  709         struct flist *flist;
  710         struct flentry *fle;
  711         uint32_t hash;
  712 
  713         FLOWSTAT_INC(ft, ft_lookups);
  714 
  715         hash = jenkins_hash32(key, keylen / sizeof(uint32_t), flow_hashjitter);
  716 
  717         critical_enter();
  718         flist = flowtable_list(ft, hash);
  719         SLIST_FOREACH(fle, flist, f_next) {
  720                 KASSERT(fle->f_hash % ft->ft_size == hash % ft->ft_size,
  721                     ("%s: wrong hash", __func__));
  722                 if (flow_matches(fle, key, keylen, fibnum)) {
  723                         fle->f_uptime = time_uptime;
  724 #ifdef FLOWTABLE_HASH_ALL
  725                         fle->f_flags |= fibnum >> 24;
  726 #endif
  727                         critical_exit();
  728                         FLOWSTAT_INC(ft, ft_hits);
  729                         return (fle);
  730                 }
  731         }
  732         critical_exit();
  733 
  734         FLOWSTAT_INC(ft, ft_misses);
  735 
  736         return (flowtable_insert(ft, hash, key, keylen, fibnum));
  737 }
  738 
  739 static void
  740 flowtable_alloc(struct flowtable *ft)
  741 {
  742         int i;
  743 
  744         ft->ft_table = malloc(ft->ft_size * sizeof(struct flist),
  745             M_FTABLE, M_WAITOK);
  746         for (int i = 0; i < ft->ft_size; i++)
  747                 ft->ft_table[i] = uma_zalloc(pcpu_zone_ptr, M_WAITOK | M_ZERO);
  748 
  749         ft->ft_masks = uma_zalloc(pcpu_zone_ptr, M_WAITOK);
  750         CPU_FOREACH(i) {
  751                 bitstr_t **b;
  752 
  753                 b = zpcpu_get_cpu(ft->ft_masks, i);
  754                 *b = bit_alloc(ft->ft_size, M_FTABLE, M_WAITOK);
  755         }
  756         ft->ft_tmpmask = bit_alloc(ft->ft_size, M_FTABLE, M_WAITOK);
  757 }
  758 
  759 static void
  760 flowtable_free_stale(struct flowtable *ft, struct rtentry *rt, int maxidle)
  761 {
  762         struct flist *flist, freelist;
  763         struct flentry *fle, *fle1, *fleprev;
  764         bitstr_t *mask, *tmpmask;
  765         int curbit, tmpsize;
  766 
  767         SLIST_INIT(&freelist);
  768         mask = flowtable_mask(ft);
  769         tmpmask = ft->ft_tmpmask;
  770         tmpsize = ft->ft_size;
  771         memcpy(tmpmask, mask, ft->ft_size/8);
  772         curbit = 0;
  773         fleprev = NULL; /* pacify gcc */
  774         /*
  775          * XXX Note to self, bit_ffs operates at the byte level
  776          * and thus adds gratuitous overhead
  777          */
  778         bit_ffs(tmpmask, ft->ft_size, &curbit);
  779         while (curbit != -1) {
  780                 if (curbit >= ft->ft_size || curbit < -1) {
  781                         log(LOG_ALERT,
  782                             "warning: bad curbit value %d \n",
  783                             curbit);
  784                         break;
  785                 }
  786 
  787                 FLOWSTAT_INC(ft, ft_free_checks);
  788 
  789                 critical_enter();
  790                 flist = flowtable_list(ft, curbit);
  791 #ifdef DIAGNOSTIC
  792                 if (SLIST_EMPTY(flist) && curbit > 0) {
  793                         log(LOG_ALERT,
  794                             "warning bit=%d set, but no fle found\n",
  795                             curbit);
  796                 }
  797 #endif
  798                 SLIST_FOREACH_SAFE(fle, flist, f_next, fle1) {
  799                         if (rt != NULL && fle->f_rt != rt) {
  800                                 fleprev = fle;
  801                                 continue;
  802                         }
  803                         if (!flow_stale(ft, fle, maxidle)) {
  804                                 fleprev = fle;
  805                                 continue;
  806                         }
  807 
  808                         if (fle == SLIST_FIRST(flist))
  809                                 SLIST_REMOVE_HEAD(flist, f_next);
  810                         else
  811                                 SLIST_REMOVE_AFTER(fleprev, f_next);
  812                         SLIST_INSERT_HEAD(&freelist, fle, f_next);
  813                 }
  814                 if (SLIST_EMPTY(flist))
  815                         bit_clear(mask, curbit);
  816                 critical_exit();
  817 
  818                 bit_clear(tmpmask, curbit);
  819                 bit_ffs(tmpmask, tmpsize, &curbit);
  820         }
  821 
  822         SLIST_FOREACH_SAFE(fle, &freelist, f_next, fle1) {
  823                 FLOWSTAT_INC(ft, ft_frees);
  824                 if (fle->f_rt != NULL)
  825                         RTFREE(fle->f_rt);
  826                 if (fle->f_lle != NULL)
  827                         LLE_FREE(fle->f_lle);
  828                 uma_zfree(flow_zone, fle);
  829         }
  830 }
  831 
  832 static void
  833 flowtable_clean_vnet(struct flowtable *ft, struct rtentry *rt, int maxidle)
  834 {
  835         int i;
  836 
  837         CPU_FOREACH(i) {
  838                 if (smp_started == 1) {
  839                         thread_lock(curthread);
  840                         sched_bind(curthread, i);
  841                         thread_unlock(curthread);
  842                 }
  843 
  844                 flowtable_free_stale(ft, rt, maxidle);
  845 
  846                 if (smp_started == 1) {
  847                         thread_lock(curthread);
  848                         sched_unbind(curthread);
  849                         thread_unlock(curthread);
  850                 }
  851         }
  852 }
  853 
  854 void
  855 flowtable_route_flush(sa_family_t sa, struct rtentry *rt)
  856 {
  857         struct flowtable *ft;
  858 
  859         switch (sa) {
  860 #ifdef INET
  861         case AF_INET:
  862                 ft = &V_ip4_ft;
  863                 break;
  864 #endif
  865 #ifdef INET6
  866         case AF_INET6:
  867                 ft = &V_ip6_ft;
  868                 break;
  869 #endif
  870         default:
  871                 panic("%s: sa %d", __func__, sa);
  872         }
  873 
  874         flowtable_clean_vnet(ft, rt, 0);
  875 }
  876 
  877 static void
  878 flowtable_cleaner(void)
  879 {
  880         VNET_ITERATOR_DECL(vnet_iter);
  881         struct thread *td;
  882 
  883         if (bootverbose)
  884                 log(LOG_INFO, "flowtable cleaner started\n");
  885         td = curthread;
  886         while (1) {
  887                 uint32_t flowclean_freq, maxidle;
  888 
  889                 /*
  890                  * The maximum idle time, as well as frequency are arbitrary.
  891                  */
  892                 if (flow_full())
  893                         maxidle = 5;
  894                 else
  895                         maxidle = 30;
  896 
  897                 VNET_LIST_RLOCK();
  898                 VNET_FOREACH(vnet_iter) {
  899                         CURVNET_SET(vnet_iter);
  900 #ifdef INET
  901                         flowtable_clean_vnet(&V_ip4_ft, NULL, maxidle);
  902 #endif
  903 #ifdef INET6
  904                         flowtable_clean_vnet(&V_ip6_ft, NULL, maxidle);
  905 #endif
  906                         CURVNET_RESTORE();
  907                 }
  908                 VNET_LIST_RUNLOCK();
  909 
  910                 if (flow_full())
  911                         flowclean_freq = 4*hz;
  912                 else
  913                         flowclean_freq = 20*hz;
  914                 mtx_lock(&flowclean_lock);
  915                 thread_lock(td);
  916                 sched_prio(td, PPAUSE);
  917                 thread_unlock(td);
  918                 flowclean_cycles++;
  919                 cv_broadcast(&flowclean_f_cv);
  920                 cv_timedwait(&flowclean_c_cv, &flowclean_lock, flowclean_freq);
  921                 mtx_unlock(&flowclean_lock);
  922         }
  923 }
  924 
  925 static void
  926 flowtable_flush(void *unused __unused)
  927 {
  928         uint64_t start;
  929 
  930         mtx_lock(&flowclean_lock);
  931         start = flowclean_cycles;
  932         while (start == flowclean_cycles) {
  933                 cv_broadcast(&flowclean_c_cv);
  934                 cv_wait(&flowclean_f_cv, &flowclean_lock);
  935         }
  936         mtx_unlock(&flowclean_lock);
  937 }
  938 
  939 static struct kproc_desc flow_kp = {
  940         "flowcleaner",
  941         flowtable_cleaner,
  942         &flowcleanerproc
  943 };
  944 SYSINIT(flowcleaner, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, kproc_start, &flow_kp);
  945 
  946 static int
  947 flowtable_get_size(char *name)
  948 {
  949         int size;
  950 
  951         if (TUNABLE_INT_FETCH(name, &size)) {
  952                 if (size < 256)
  953                         size = 256;
  954                 if (!powerof2(size)) {
  955                         printf("%s must be power of 2\n", name);
  956                         size = 2048;
  957                 }
  958         } else {
  959                 /*
  960                  * round up to the next power of 2
  961                  */
  962                 size = 1 << fls((1024 + maxusers * 64) - 1);
  963         }
  964 
  965         return (size);
  966 }
  967 
  968 static void
  969 flowtable_init(const void *unused __unused)
  970 {
  971 
  972         flow_hashjitter = arc4random();
  973 
  974         flow_zone = uma_zcreate("flows", sizeof(struct flentry),
  975             NULL, NULL, NULL, NULL, (64-1), UMA_ZONE_MAXBUCKET);
  976         uma_zone_set_max(flow_zone, 1024 + maxusers * 64 * mp_ncpus);
  977 
  978         cv_init(&flowclean_c_cv, "c_flowcleanwait");
  979         cv_init(&flowclean_f_cv, "f_flowcleanwait");
  980         mtx_init(&flowclean_lock, "flowclean lock", NULL, MTX_DEF);
  981         EVENTHANDLER_REGISTER(ifnet_departure_event, flowtable_flush, NULL,
  982             EVENTHANDLER_PRI_ANY);
  983 }
  984 SYSINIT(flowtable_init, SI_SUB_PROTO_BEGIN, SI_ORDER_FIRST,
  985     flowtable_init, NULL);
  986 
  987 #ifdef INET
  988 static SYSCTL_NODE(_net_flowtable, OID_AUTO, ip4, CTLFLAG_RD, NULL,
  989     "Flowtable for IPv4");
  990 
  991 static VNET_PCPUSTAT_DEFINE(struct flowtable_stat, ip4_ftstat);
  992 VNET_PCPUSTAT_SYSINIT(ip4_ftstat);
  993 VNET_PCPUSTAT_SYSUNINIT(ip4_ftstat);
  994 SYSCTL_VNET_PCPUSTAT(_net_flowtable_ip4, OID_AUTO, stat, struct flowtable_stat,
  995     ip4_ftstat, "Flowtable statistics for IPv4 "
  996     "(struct flowtable_stat, net/flowtable.h)");
  997 
  998 static void
  999 flowtable_init_vnet_v4(const void *unused __unused)
 1000 {
 1001 
 1002         V_ip4_ft.ft_size = flowtable_get_size("net.flowtable.ip4.size");
 1003         V_ip4_ft.ft_stat = VNET(ip4_ftstat);
 1004         flowtable_alloc(&V_ip4_ft);
 1005 }
 1006 VNET_SYSINIT(ft_vnet_v4, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
 1007     flowtable_init_vnet_v4, NULL);
 1008 #endif /* INET */
 1009 
 1010 #ifdef INET6
 1011 static SYSCTL_NODE(_net_flowtable, OID_AUTO, ip6, CTLFLAG_RD, NULL,
 1012     "Flowtable for IPv6");
 1013 
 1014 static VNET_PCPUSTAT_DEFINE(struct flowtable_stat, ip6_ftstat);
 1015 VNET_PCPUSTAT_SYSINIT(ip6_ftstat);
 1016 VNET_PCPUSTAT_SYSUNINIT(ip6_ftstat);
 1017 SYSCTL_VNET_PCPUSTAT(_net_flowtable_ip6, OID_AUTO, stat, struct flowtable_stat,
 1018     ip6_ftstat, "Flowtable statistics for IPv6 "
 1019     "(struct flowtable_stat, net/flowtable.h)");
 1020 
 1021 static void
 1022 flowtable_init_vnet_v6(const void *unused __unused)
 1023 {
 1024 
 1025         V_ip6_ft.ft_size = flowtable_get_size("net.flowtable.ip6.size");
 1026         V_ip6_ft.ft_stat = VNET(ip6_ftstat);
 1027         flowtable_alloc(&V_ip6_ft);
 1028 }
 1029 VNET_SYSINIT(flowtable_init_vnet_v6, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
 1030     flowtable_init_vnet_v6, NULL);
 1031 #endif /* INET6 */
 1032 
 1033 #ifdef DDB
 1034 static bitstr_t *
 1035 flowtable_mask_pcpu(struct flowtable *ft, int cpuid)
 1036 {
 1037 
 1038         return (zpcpu_get_cpu(*ft->ft_masks, cpuid));
 1039 }
 1040 
 1041 static struct flist *
 1042 flowtable_list_pcpu(struct flowtable *ft, uint32_t hash, int cpuid)
 1043 {
 1044 
 1045         return (zpcpu_get_cpu(&ft->ft_table[hash % ft->ft_size], cpuid));
 1046 }
 1047 
 1048 static void
 1049 flow_show(struct flowtable *ft, struct flentry *fle)
 1050 {
 1051         int idle_time;
 1052         int rt_valid, ifp_valid;
 1053         volatile struct rtentry *rt;
 1054         struct ifnet *ifp = NULL;
 1055         uint32_t *hashkey = fle->f_key;
 1056 
 1057         idle_time = (int)(time_uptime - fle->f_uptime);
 1058         rt = fle->f_rt;
 1059         rt_valid = rt != NULL;
 1060         if (rt_valid)
 1061                 ifp = rt->rt_ifp;
 1062         ifp_valid = ifp != NULL;
 1063 
 1064 #ifdef INET
 1065         if (ft == &V_ip4_ft) {
 1066                 char daddr[4*sizeof "123"];
 1067 #ifdef FLOWTABLE_HASH_ALL
 1068                 char saddr[4*sizeof "123"];
 1069                 uint16_t sport, dport;
 1070 #endif
 1071 
 1072                 inet_ntoa_r(*(struct in_addr *) &hashkey[0], daddr);
 1073 #ifdef FLOWTABLE_HASH_ALL
 1074                 inet_ntoa_r(*(struct in_addr *) &hashkey[1], saddr);
 1075                 dport = ntohs((uint16_t)(hashkey[2] >> 16));
 1076                 sport = ntohs((uint16_t)(hashkey[2] & 0xffff));
 1077                 db_printf("%s:%d->%s:%d", saddr, sport, daddr, dport);
 1078 #else
 1079                 db_printf("%s ", daddr);
 1080 #endif
 1081         }
 1082 #endif /* INET */
 1083 #ifdef INET6
 1084         if (ft == &V_ip6_ft) {
 1085 #ifdef FLOWTABLE_HASH_ALL
 1086                 db_printf("\n\tkey=%08x:%08x:%08x%08x:%08x:%08x%08x:%08x:%08x",
 1087                     hashkey[0], hashkey[1], hashkey[2],
 1088                     hashkey[3], hashkey[4], hashkey[5],
 1089                     hashkey[6], hashkey[7], hashkey[8]);
 1090 #else
 1091                 db_printf("\n\tkey=%08x:%08x:%08x ",
 1092                     hashkey[0], hashkey[1], hashkey[2]);
 1093 #endif
 1094         }
 1095 #endif /* INET6 */
 1096 
 1097         db_printf("hash=%08x idle_time=%03d"
 1098             "\n\tfibnum=%02d rt=%p",
 1099             fle->f_hash, idle_time, fle->f_fibnum, fle->f_rt);
 1100 
 1101 #ifdef FLOWTABLE_HASH_ALL
 1102         if (fle->f_flags & FL_STALE)
 1103                 db_printf(" FL_STALE ");
 1104 #endif
 1105         if (rt_valid) {
 1106                 if (rt->rt_flags & RTF_UP)
 1107                         db_printf(" RTF_UP ");
 1108         }
 1109         if (ifp_valid) {
 1110                 if (ifp->if_flags & IFF_LOOPBACK)
 1111                         db_printf(" IFF_LOOPBACK ");
 1112                 if (ifp->if_flags & IFF_UP)
 1113                         db_printf(" IFF_UP ");
 1114                 if (ifp->if_flags & IFF_POINTOPOINT)
 1115                         db_printf(" IFF_POINTOPOINT ");
 1116         }
 1117         db_printf("\n");
 1118 }
 1119 
 1120 static void
 1121 flowtable_show(struct flowtable *ft, int cpuid)
 1122 {
 1123         int curbit = 0;
 1124         bitstr_t *mask, *tmpmask;
 1125 
 1126         if (cpuid != -1)
 1127                 db_printf("cpu: %d\n", cpuid);
 1128         mask = flowtable_mask_pcpu(ft, cpuid);
 1129         tmpmask = ft->ft_tmpmask;
 1130         memcpy(tmpmask, mask, ft->ft_size/8);
 1131         /*
 1132          * XXX Note to self, bit_ffs operates at the byte level
 1133          * and thus adds gratuitous overhead
 1134          */
 1135         bit_ffs(tmpmask, ft->ft_size, &curbit);
 1136         while (curbit != -1) {
 1137                 struct flist *flist;
 1138                 struct flentry *fle;
 1139 
 1140                 if (curbit >= ft->ft_size || curbit < -1) {
 1141                         db_printf("warning: bad curbit value %d \n",
 1142                             curbit);
 1143                         break;
 1144                 }
 1145 
 1146                 flist = flowtable_list_pcpu(ft, curbit, cpuid);
 1147 
 1148                 SLIST_FOREACH(fle, flist, f_next)
 1149                         flow_show(ft, fle);
 1150                 bit_clear(tmpmask, curbit);
 1151                 bit_ffs(tmpmask, ft->ft_size, &curbit);
 1152         }
 1153 }
 1154 
 1155 static void
 1156 flowtable_show_vnet(struct flowtable *ft)
 1157 {
 1158 
 1159         int i;
 1160 
 1161         CPU_FOREACH(i)
 1162                 flowtable_show(ft, i);
 1163 }
 1164 
 1165 DB_SHOW_COMMAND(flowtables, db_show_flowtables)
 1166 {
 1167         VNET_ITERATOR_DECL(vnet_iter);
 1168 
 1169         VNET_FOREACH(vnet_iter) {
 1170                 CURVNET_SET(vnet_iter);
 1171 #ifdef VIMAGE
 1172                 db_printf("vnet %p\n", vnet_iter);
 1173 #endif
 1174 #ifdef INET
 1175                 printf("IPv4:\n");
 1176                 flowtable_show_vnet(&V_ip4_ft);
 1177 #endif
 1178 #ifdef INET6
 1179                 printf("IPv6:\n");
 1180                 flowtable_show_vnet(&V_ip6_ft);
 1181 #endif
 1182                 CURVNET_RESTORE();
 1183         }
 1184 }
 1185 #endif

Cache object: ef22c2cb6eb1a7ad983a7bc919ac54db


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.