The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgb/cxgb_l2t.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**************************************************************************
    2 
    3 Copyright (c) 2007, Chelsio Inc.
    4 All rights reserved.
    5 
    6 Redistribution and use in source and binary forms, with or without
    7 modification, are permitted provided that the following conditions are met:
    8 
    9  1. Redistributions of source code must retain the above copyright notice,
   10     this list of conditions and the following disclaimer.
   11 
   12  2. Neither the name of the Chelsio Corporation nor the names of its
   13     contributors may be used to endorse or promote products derived from
   14     this software without specific prior written permission.
   15  
   16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   26 POSSIBILITY OF SUCH DAMAGE.
   27 
   28 ***************************************************************************/
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/6.3/sys/dev/cxgb/cxgb_l2t.c 171884 2007-08-18 09:10:26Z kmacy $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/kernel.h>
   36 #include <sys/module.h>
   37 #include <sys/bus.h>
   38 #include <sys/lock.h>
   39 #include <sys/mutex.h>
   40 #if __FreeBSD_version > 700000
   41 #include <sys/rwlock.h>
   42 #endif
   43 
   44 #include <sys/socket.h>
   45 #include <sys/socketvar.h>
   46 #include <net/if.h>
   47 #include <net/ethernet.h>
   48 #include <net/if_vlan_var.h>
   49 #include <net/if_dl.h>
   50 #include <net/route.h>
   51 #include <netinet/in.h>
   52 #include <netinet/if_ether.h>
   53 
   54 #ifdef CONFIG_DEFINED
   55 #include <cxgb_include.h>
   56 #else
   57 #include <dev/cxgb/cxgb_include.h>
   58 #endif
   59 
   60 #define VLAN_NONE 0xfff
   61 #define SDL(s) ((struct sockaddr_dl *)s) 
   62 #define RT_ENADDR(rt)  ((u_char *)LLADDR(SDL((rt))))
   63 #define rt_expire rt_rmx.rmx_expire 
   64 
   65 struct llinfo_arp { 
   66         struct  callout la_timer; 
   67         struct  rtentry *la_rt; 
   68         struct  mbuf *la_hold;  /* last packet until resolved/timeout */ 
   69         u_short la_preempt;     /* countdown for pre-expiry arps */ 
   70         u_short la_asked;       /* # requests sent */ 
   71 }; 
   72 
   73 /*
   74  * Module locking notes:  There is a RW lock protecting the L2 table as a
   75  * whole plus a spinlock per L2T entry.  Entry lookups and allocations happen
   76  * under the protection of the table lock, individual entry changes happen
   77  * while holding that entry's spinlock.  The table lock nests outside the
   78  * entry locks.  Allocations of new entries take the table lock as writers so
   79  * no other lookups can happen while allocating new entries.  Entry updates
   80  * take the table lock as readers so multiple entries can be updated in
   81  * parallel.  An L2T entry can be dropped by decrementing its reference count
   82  * and therefore can happen in parallel with entry allocation but no entry
   83  * can change state or increment its ref count during allocation as both of
   84  * these perform lookups.
   85  */
   86 
   87 static inline unsigned int
   88 vlan_prio(const struct l2t_entry *e)
   89 {
   90         return e->vlan >> 13;
   91 }
   92 
   93 static inline unsigned int
   94 arp_hash(u32 key, int ifindex, const struct l2t_data *d)
   95 {
   96         return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
   97 }
   98 
   99 static inline void
  100 neigh_replace(struct l2t_entry *e, struct rtentry *rt)
  101 {
  102         RT_LOCK(rt);
  103         RT_ADDREF(rt);
  104         RT_UNLOCK(rt);
  105         
  106         if (e->neigh) {
  107                 RT_LOCK(e->neigh);
  108                 RT_REMREF(e->neigh);
  109                 RT_UNLOCK(e->neigh);
  110         }
  111         e->neigh = rt;
  112 }
  113 
  114 /*
  115  * Set up an L2T entry and send any packets waiting in the arp queue.  The
  116  * supplied mbuf is used for the CPL_L2T_WRITE_REQ.  Must be called with the
  117  * entry locked.
  118  */
  119 static int
  120 setup_l2e_send_pending(struct toedev *dev, struct mbuf *m,
  121                         struct l2t_entry *e)
  122 {
  123         struct cpl_l2t_write_req *req;
  124 
  125         if (!m) {
  126                 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
  127                     return (ENOMEM);
  128         }
  129         /*
  130          * XXX MH_ALIGN
  131          */
  132         req = mtod(m, struct cpl_l2t_write_req *);
  133         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  134         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
  135         req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
  136                             V_L2T_W_VLAN(e->vlan & EVL_VLID_MASK) |
  137                             V_L2T_W_PRIO(vlan_prio(e)));
  138 
  139         memcpy(e->dmac, RT_ENADDR(e->neigh), sizeof(e->dmac));
  140         memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
  141         m_set_priority(m, CPL_PRIORITY_CONTROL);
  142         cxgb_ofld_send(dev, m);
  143         while (e->arpq_head) {
  144                 m = e->arpq_head;
  145                 e->arpq_head = m->m_next;
  146                 m->m_next = NULL;
  147                 cxgb_ofld_send(dev, m);
  148         }
  149         e->arpq_tail = NULL;
  150         e->state = L2T_STATE_VALID;
  151 
  152         return 0;
  153 }
  154 
  155 /*
  156  * Add a packet to the an L2T entry's queue of packets awaiting resolution.
  157  * Must be called with the entry's lock held.
  158  */
  159 static inline void
  160 arpq_enqueue(struct l2t_entry *e, struct mbuf *m)
  161 {
  162         m->m_next = NULL;
  163         if (e->arpq_head)
  164                 e->arpq_tail->m_next = m;
  165         else
  166                 e->arpq_head = m;
  167         e->arpq_tail = m;
  168 }
  169 
  170 int
  171 t3_l2t_send_slow(struct toedev *dev, struct mbuf *m,
  172                      struct l2t_entry *e)
  173 {
  174         struct rtentry *rt;
  175         struct mbuf *m0;
  176         
  177         if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
  178                 return (ENOMEM);
  179 
  180         rt = e->neigh;
  181         
  182 again:
  183         switch (e->state) {
  184         case L2T_STATE_STALE:     /* entry is stale, kick off revalidation */
  185                 arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt));
  186                 mtx_lock(&e->lock);
  187                 if (e->state == L2T_STATE_STALE)
  188                         e->state = L2T_STATE_VALID;
  189                 mtx_unlock(&e->lock);
  190         case L2T_STATE_VALID:     /* fast-path, send the packet on */
  191                 return cxgb_ofld_send(dev, m);
  192         case L2T_STATE_RESOLVING:
  193                 mtx_lock(&e->lock);
  194                 if (e->state != L2T_STATE_RESOLVING) { // ARP already completed
  195                         mtx_unlock(&e->lock);
  196                         goto again;
  197                 }
  198                 arpq_enqueue(e, m);
  199                 mtx_unlock(&e->lock);
  200 
  201                 if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
  202                         return (ENOMEM);
  203                 /*
  204                  * Only the first packet added to the arpq should kick off
  205                  * resolution.  However, because the m_gethdr below can fail,
  206                  * we allow each packet added to the arpq to retry resolution
  207                  * as a way of recovering from transient memory exhaustion.
  208                  * A better way would be to use a work request to retry L2T
  209                  * entries when there's no memory.
  210                  */
  211                 if (arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt)) == 0) {
  212 
  213                         mtx_lock(&e->lock);
  214                         if (e->arpq_head) 
  215                                 setup_l2e_send_pending(dev, m, e);
  216                         else
  217                                 m_freem(m);
  218                         mtx_unlock(&e->lock);
  219                 }
  220         }
  221         return 0;
  222 }
  223 
  224 void
  225 t3_l2t_send_event(struct toedev *dev, struct l2t_entry *e)
  226 {
  227         struct rtentry *rt;
  228         struct mbuf *m0;
  229         
  230         if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
  231                 return;
  232 
  233         rt = e->neigh;
  234 again:
  235         switch (e->state) {
  236         case L2T_STATE_STALE:     /* entry is stale, kick off revalidation */
  237                 arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt));
  238                 mtx_lock(&e->lock);
  239                 if (e->state == L2T_STATE_STALE) {
  240                         e->state = L2T_STATE_VALID;
  241                 }
  242                 mtx_unlock(&e->lock);
  243                 return;
  244         case L2T_STATE_VALID:     /* fast-path, send the packet on */
  245                 return;
  246         case L2T_STATE_RESOLVING:
  247                 mtx_lock(&e->lock);
  248                 if (e->state != L2T_STATE_RESOLVING) { // ARP already completed
  249                         mtx_unlock(&e->lock);
  250                         goto again;
  251                 }
  252                 mtx_unlock(&e->lock);
  253                 
  254                 if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
  255                         return;
  256                 /*
  257                  * Only the first packet added to the arpq should kick off
  258                  * resolution.  However, because the alloc_skb below can fail,
  259                  * we allow each packet added to the arpq to retry resolution
  260                  * as a way of recovering from transient memory exhaustion.
  261                  * A better way would be to use a work request to retry L2T
  262                  * entries when there's no memory.
  263                  */
  264                 arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt));
  265 
  266         }
  267         return;
  268 }
  269 /*
  270  * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
  271  */
  272 static struct l2t_entry *
  273 alloc_l2e(struct l2t_data *d)
  274 {
  275         struct l2t_entry *end, *e, **p;
  276 
  277         if (!atomic_load_acq_int(&d->nfree))
  278                 return NULL;
  279 
  280         /* there's definitely a free entry */
  281         for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
  282                 if (atomic_load_acq_int(&e->refcnt) == 0)
  283                         goto found;
  284 
  285         for (e = &d->l2tab[1]; atomic_load_acq_int(&e->refcnt); ++e) ;
  286 found:
  287         d->rover = e + 1;
  288         atomic_add_int(&d->nfree, -1);
  289 
  290         /*
  291          * The entry we found may be an inactive entry that is
  292          * presently in the hash table.  We need to remove it.
  293          */
  294         if (e->state != L2T_STATE_UNUSED) {
  295                 int hash = arp_hash(e->addr, e->ifindex, d);
  296 
  297                 for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
  298                         if (*p == e) {
  299                                 *p = e->next;
  300                                 break;
  301                         }
  302                 e->state = L2T_STATE_UNUSED;
  303         }
  304         return e;
  305 }
  306 
  307 /*
  308  * Called when an L2T entry has no more users.  The entry is left in the hash
  309  * table since it is likely to be reused but we also bump nfree to indicate
  310  * that the entry can be reallocated for a different neighbor.  We also drop
  311  * the existing neighbor reference in case the neighbor is going away and is
  312  * waiting on our reference.
  313  *
  314  * Because entries can be reallocated to other neighbors once their ref count
  315  * drops to 0 we need to take the entry's lock to avoid races with a new
  316  * incarnation.
  317  */
  318 void
  319 t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
  320 {
  321         mtx_lock(&e->lock);
  322         if (atomic_load_acq_int(&e->refcnt) == 0) {  /* hasn't been recycled */
  323                 if (e->neigh) {
  324                         RT_LOCK(e->neigh);
  325                         RT_REMREF(e->neigh);
  326                         RT_UNLOCK(e->neigh);
  327                         e->neigh = NULL;
  328                 }
  329         }
  330         mtx_unlock(&e->lock);
  331         atomic_add_int(&d->nfree, 1);
  332 }
  333 
  334 /*
  335  * Update an L2T entry that was previously used for the same next hop as neigh.
  336  * Must be called with softirqs disabled.
  337  */
  338 static inline void
  339 reuse_entry(struct l2t_entry *e, struct rtentry *neigh)
  340 {
  341         struct llinfo_arp *la;
  342 
  343         la = (struct llinfo_arp *)neigh->rt_llinfo; 
  344 
  345         mtx_lock(&e->lock);                /* avoid race with t3_l2t_free */
  346         if (neigh != e->neigh)
  347                 neigh_replace(e, neigh);
  348         
  349         if (memcmp(e->dmac, RT_ENADDR(neigh), sizeof(e->dmac)) ||
  350             (neigh->rt_expire > time_uptime))
  351                 e->state = L2T_STATE_RESOLVING;
  352         else if (la->la_hold == NULL)
  353                 e->state = L2T_STATE_VALID;
  354         else
  355                 e->state = L2T_STATE_STALE;
  356         mtx_unlock(&e->lock);
  357 }
  358 
  359 struct l2t_entry *
  360 t3_l2t_get(struct toedev *dev, struct rtentry *neigh,
  361                              unsigned int smt_idx)
  362 {
  363         struct l2t_entry *e;
  364         struct l2t_data *d = L2DATA(dev);
  365         u32 addr = *(u32 *) rt_key(neigh);
  366         int ifidx = neigh->rt_ifp->if_index;
  367         int hash = arp_hash(addr, ifidx, d);
  368 
  369         rw_wlock(&d->lock);
  370         for (e = d->l2tab[hash].first; e; e = e->next)
  371                 if (e->addr == addr && e->ifindex == ifidx &&
  372                     e->smt_idx == smt_idx) {
  373                         l2t_hold(d, e);
  374                         if (atomic_load_acq_int(&e->refcnt) == 1)
  375                                 reuse_entry(e, neigh);
  376                         goto done;
  377                 }
  378 
  379         /* Need to allocate a new entry */
  380         e = alloc_l2e(d);
  381         if (e) {
  382                 mtx_lock(&e->lock);          /* avoid race with t3_l2t_free */
  383                 e->next = d->l2tab[hash].first;
  384                 d->l2tab[hash].first = e;
  385                 e->state = L2T_STATE_RESOLVING;
  386                 e->addr = addr;
  387                 e->ifindex = ifidx;
  388                 e->smt_idx = smt_idx;
  389                 atomic_store_rel_int(&e->refcnt, 1);
  390                 neigh_replace(e, neigh);
  391 #ifdef notyet
  392                 /* 
  393                  * XXX need to add accessor function for vlan tag
  394                  */
  395                 if (neigh->rt_ifp->if_vlantrunk)
  396                         e->vlan = VLAN_DEV_INFO(neigh->dev)->vlan_id;
  397                 else
  398 #endif                      
  399                         e->vlan = VLAN_NONE;
  400                 mtx_unlock(&e->lock);
  401         }
  402 done:
  403         rw_wunlock(&d->lock);
  404         return e;
  405 }
  406 
  407 /*
  408  * Called when address resolution fails for an L2T entry to handle packets
  409  * on the arpq head.  If a packet specifies a failure handler it is invoked,
  410  * otherwise the packets is sent to the TOE.
  411  *
  412  * XXX: maybe we should abandon the latter behavior and just require a failure
  413  * handler.
  414  */
  415 static void
  416 handle_failed_resolution(struct toedev *dev, struct mbuf *arpq)
  417 {
  418 
  419         while (arpq) {
  420                 struct mbuf *m = arpq;
  421 #ifdef notyet           
  422                 struct l2t_mbuf_cb *cb = L2T_MBUF_CB(m);
  423 #endif
  424                 arpq = m->m_next;
  425                 m->m_next = NULL;
  426 #ifdef notyet           
  427                 if (cb->arp_failure_handler)
  428                         cb->arp_failure_handler(dev, m);
  429                 else
  430 #endif                  
  431                         cxgb_ofld_send(dev, m);
  432         }
  433 
  434 }
  435 
  436 #if defined(NETEVENT) || !defined(CONFIG_CHELSIO_T3_MODULE)
  437 /*
  438  * Called when the host's ARP layer makes a change to some entry that is
  439  * loaded into the HW L2 table.
  440  */
  441 void
  442 t3_l2t_update(struct toedev *dev, struct rtentry *neigh)
  443 {
  444         struct l2t_entry *e;
  445         struct mbuf *arpq = NULL;
  446         struct l2t_data *d = L2DATA(dev);
  447         u32 addr = *(u32 *) rt_key(neigh);
  448         int ifidx = neigh->rt_ifp->if_index;
  449         int hash = arp_hash(addr, ifidx, d);
  450         struct llinfo_arp *la;
  451         
  452         rw_rlock(&d->lock);
  453         for (e = d->l2tab[hash].first; e; e = e->next)
  454                 if (e->addr == addr && e->ifindex == ifidx) {
  455                         mtx_lock(&e->lock);
  456                         goto found;
  457                 }
  458         rw_runlock(&d->lock);
  459         return;
  460 
  461 found:
  462         rw_runlock(&d->lock);
  463         if (atomic_load_acq_int(&e->refcnt)) {
  464                 if (neigh != e->neigh)
  465                         neigh_replace(e, neigh);
  466                 
  467                 la = (struct llinfo_arp *)neigh->rt_llinfo; 
  468                 if (e->state == L2T_STATE_RESOLVING) {
  469                         
  470                         if (la->la_asked >= 5 /* arp_maxtries */) {
  471                                 arpq = e->arpq_head;
  472                                 e->arpq_head = e->arpq_tail = NULL;
  473                         } else if (la->la_hold == NULL)
  474                                 setup_l2e_send_pending(dev, NULL, e);
  475                 } else {
  476                         e->state = (la->la_hold == NULL) ?
  477                                 L2T_STATE_VALID : L2T_STATE_STALE;
  478                         if (memcmp(e->dmac, RT_ENADDR(neigh), 6))
  479                                 setup_l2e_send_pending(dev, NULL, e);
  480                 }
  481         }
  482         mtx_unlock(&e->lock);
  483 
  484         if (arpq)
  485                 handle_failed_resolution(dev, arpq);
  486 }
  487 #else
  488 /*
  489  * Called from a kprobe, interrupts are off.
  490  */
  491 void
  492 t3_l2t_update(struct toedev *dev, struct rtentry *neigh)
  493 {
  494         struct l2t_entry *e;
  495         struct l2t_data *d = L2DATA(dev);
  496         u32 addr = *(u32 *) rt_key(neigh);
  497         int ifidx = neigh->dev->ifindex;
  498         int hash = arp_hash(addr, ifidx, d);
  499 
  500         rw_rlock(&d->lock);
  501         for (e = d->l2tab[hash].first; e; e = e->next)
  502                 if (e->addr == addr && e->ifindex == ifidx) {
  503                         mtx_lock(&e->lock);
  504                         if (atomic_load_acq_int(&e->refcnt)) {
  505                                 if (neigh != e->neigh)
  506                                         neigh_replace(e, neigh);
  507                                 e->tdev = dev;
  508                                 mod_timer(&e->update_timer, jiffies + 1);
  509                         }
  510                         mtx_unlock(&e->lock);
  511                         break;
  512                 }
  513         rw_runlock(&d->lock);
  514 }
  515 
  516 static void
  517 update_timer_cb(unsigned long data)
  518 {
  519         struct mbuf *arpq = NULL;
  520         struct l2t_entry *e = (struct l2t_entry *)data;
  521         struct rtentry *neigh = e->neigh;
  522         struct toedev *dev = e->tdev;
  523 
  524         barrier();
  525         if (!atomic_load_acq_int(&e->refcnt))
  526                 return;
  527 
  528         rw_rlock(&neigh->lock);
  529         mtx_lock(&e->lock);
  530 
  531         if (atomic_load_acq_int(&e->refcnt)) {
  532                 if (e->state == L2T_STATE_RESOLVING) {
  533                         if (neigh->nud_state & NUD_FAILED) {
  534                                 arpq = e->arpq_head;
  535                                 e->arpq_head = e->arpq_tail = NULL;
  536                         } else if (neigh_is_connected(neigh) && e->arpq_head)
  537                                 setup_l2e_send_pending(dev, NULL, e);
  538                 } else {
  539                         e->state = neigh_is_connected(neigh) ?
  540                                 L2T_STATE_VALID : L2T_STATE_STALE;
  541                         if (memcmp(e->dmac, RT_ENADDR(neigh), sizeof(e->dmac)))
  542                                 setup_l2e_send_pending(dev, NULL, e);
  543                 }
  544         }
  545         mtx_unlock(&e->lock);
  546         rw_runlock(&neigh->lock);
  547 
  548         if (arpq)
  549                 handle_failed_resolution(dev, arpq);
  550 }
  551 #endif
  552 
  553 struct l2t_data *
  554 t3_init_l2t(unsigned int l2t_capacity)
  555 {
  556         struct l2t_data *d;
  557         int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
  558 
  559         d = cxgb_alloc_mem(size);
  560         if (!d)
  561                 return NULL;
  562 
  563         d->nentries = l2t_capacity;
  564         d->rover = &d->l2tab[1];        /* entry 0 is not used */
  565         atomic_store_rel_int(&d->nfree, l2t_capacity - 1);
  566         rw_init(&d->lock, "L2T");
  567 
  568         for (i = 0; i < l2t_capacity; ++i) {
  569                 d->l2tab[i].idx = i;
  570                 d->l2tab[i].state = L2T_STATE_UNUSED;
  571                 mtx_init(&d->l2tab[i].lock, "L2TAB", NULL, MTX_DEF);
  572                 atomic_store_rel_int(&d->l2tab[i].refcnt, 0);
  573 #ifndef NETEVENT
  574 #ifdef CONFIG_CHELSIO_T3_MODULE
  575                 setup_timer(&d->l2tab[i].update_timer, update_timer_cb,
  576                             (unsigned long)&d->l2tab[i]);
  577 #endif
  578 #endif
  579         }
  580         return d;
  581 }
  582 
  583 void
  584 t3_free_l2t(struct l2t_data *d)
  585 {
  586 #ifndef NETEVENT
  587 #ifdef CONFIG_CHELSIO_T3_MODULE
  588         int i;
  589 
  590         /* Stop all L2T timers */
  591         for (i = 0; i < d->nentries; ++i)
  592                 del_timer_sync(&d->l2tab[i].update_timer);
  593 #endif
  594 #endif
  595         cxgb_free_mem(d);
  596 }
  597 
  598 #ifdef CONFIG_PROC_FS
  599 #include <linux/module.h>
  600 #include <linux/proc_fs.h>
  601 #include <linux/seq_file.h>
  602 
  603 static inline void *
  604 l2t_get_idx(struct seq_file *seq, loff_t pos)
  605 {
  606         struct l2t_data *d = seq->private;
  607 
  608         return pos >= d->nentries ? NULL : &d->l2tab[pos];
  609 }
  610 
  611 static void *
  612 l2t_seq_start(struct seq_file *seq, loff_t *pos)
  613 {
  614         return *pos ? l2t_get_idx(seq, *pos) : SEQ_START_TOKEN;
  615 }
  616 
  617 static void *
  618 l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  619 {
  620         v = l2t_get_idx(seq, *pos + 1);
  621         if (v)
  622                 ++*pos;
  623         return v;
  624 }
  625 
  626 static void
  627 l2t_seq_stop(struct seq_file *seq, void *v)
  628 {
  629 }
  630 
  631 static char
  632 l2e_state(const struct l2t_entry *e)
  633 {
  634         switch (e->state) {
  635         case L2T_STATE_VALID: return 'V';  /* valid, fast-path entry */
  636         case L2T_STATE_STALE: return 'S';  /* needs revalidation, but usable */
  637         case L2T_STATE_RESOLVING:
  638                 return e->arpq_head ? 'A' : 'R';
  639         default:
  640                 return 'U';
  641         }
  642 }
  643 
  644 static int
  645 l2t_seq_show(struct seq_file *seq, void *v)
  646 {
  647         if (v == SEQ_START_TOKEN)
  648                 seq_puts(seq, "Index IP address      Ethernet address   VLAN  "
  649                          "Prio  State   Users SMTIDX  Port\n");
  650         else {
  651                 char ip[20];
  652                 struct l2t_entry *e = v;
  653 
  654                 mtx_lock(&e->lock);
  655                 sprintf(ip, "%u.%u.%u.%u", NIPQUAD(e->addr));
  656                 seq_printf(seq, "%-5u %-15s %02x:%02x:%02x:%02x:%02x:%02x  %4d"
  657                            "  %3u     %c   %7u   %4u %s\n",
  658                            e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2],
  659                            e->dmac[3], e->dmac[4], e->dmac[5],
  660                            e->vlan & EVL_VLID_MASK, vlan_prio(e),
  661                            l2e_state(e), atomic_load_acq_int(&e->refcnt), e->smt_idx,
  662                            e->neigh ? e->neigh->dev->name : "");
  663                 mtx_unlock(&e->lock);
  664         }
  665         return 0;
  666 }
  667 
  668 #endif

Cache object: 9feb41de082fcfbe6fe8aa5fb5580f00


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.