The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgb/cxgb_offload.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**************************************************************************
    2 
    3 Copyright (c) 2007-2008, Chelsio Inc.
    4 All rights reserved.
    5 
    6 Redistribution and use in source and binary forms, with or without
    7 modification, are permitted provided that the following conditions are met:
    8 
    9  1. Redistributions of source code must retain the above copyright notice,
   10     this list of conditions and the following disclaimer.
   11 
   12  2. Neither the name of the Chelsio Corporation nor the names of its
   13     contributors may be used to endorse or promote products derived from
   14     this software without specific prior written permission.
   15 
   16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   26 POSSIBILITY OF SUCH DAMAGE.
   27 
   28 
   29 ***************************************************************************/
   30 
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD: releng/7.4/sys/dev/cxgb/cxgb_offload.c 194042 2009-06-11 22:00:54Z gnn $");
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/kernel.h>
   38 #include <sys/bus.h>
   39 #include <sys/module.h>
   40 #include <sys/pciio.h>
   41 #include <sys/conf.h>
   42 #include <machine/bus.h>
   43 #include <machine/resource.h>
   44 #include <sys/bus_dma.h>
   45 #include <sys/rman.h>
   46 #include <sys/ioccom.h>
   47 #include <sys/mbuf.h>
   48 #include <sys/linker.h>
   49 #include <sys/firmware.h>
   50 #include <sys/socket.h>
   51 #include <sys/sockio.h>
   52 #include <sys/smp.h>
   53 #include <sys/sysctl.h>
   54 #include <sys/syslog.h>
   55 #include <sys/queue.h>
   56 #include <sys/taskqueue.h>
   57 #include <sys/proc.h>
   58 
   59 #ifdef CONFIG_DEFINED
   60 #include <cxgb_include.h>
   61 #else
   62 #include <dev/cxgb/cxgb_include.h>
   63 #endif
   64 
   65 #include <net/route.h>
   66 
   67 #define VALIDATE_TID 0
   68 MALLOC_DEFINE(M_CXGB, "cxgb", "Chelsio 10 Gigabit Ethernet and services");
   69 
   70 TAILQ_HEAD(, cxgb_client) client_list;
   71 TAILQ_HEAD(, t3cdev) ofld_dev_list;
   72 
   73 
   74 static struct mtx cxgb_db_lock;
   75 
   76 
   77 static int inited = 0;
   78 
   79 static inline int
   80 offload_activated(struct t3cdev *tdev)
   81 {
   82         struct adapter *adapter = tdev2adap(tdev);
   83         
   84         return (isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT));
   85 }
   86 
   87 static inline void
   88 register_tdev(struct t3cdev *tdev)
   89 {
   90         static int unit;
   91 
   92         mtx_lock(&cxgb_db_lock);
   93         snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
   94         TAILQ_INSERT_TAIL(&ofld_dev_list, tdev, entry);
   95         mtx_unlock(&cxgb_db_lock);
   96 }
   97 
   98 static inline void
   99 unregister_tdev(struct t3cdev *tdev)
  100 {
  101         if (!inited)
  102                 return;
  103 
  104         mtx_lock(&cxgb_db_lock);
  105         TAILQ_REMOVE(&ofld_dev_list, tdev, entry);
  106         mtx_unlock(&cxgb_db_lock);      
  107 }
  108 
  109 #ifndef TCP_OFFLOAD_DISABLE
  110 /**
  111  *      cxgb_register_client - register an offload client
  112  *      @client: the client
  113  *
  114  *      Add the client to the client list,
  115  *      and call backs the client for each activated offload device
  116  */
  117 void
  118 cxgb_register_client(struct cxgb_client *client)
  119 {
  120         struct t3cdev *tdev;
  121 
  122         mtx_lock(&cxgb_db_lock);
  123         TAILQ_INSERT_TAIL(&client_list, client, client_entry);
  124 
  125         if (client->add) {
  126                 TAILQ_FOREACH(tdev, &ofld_dev_list, entry) {
  127                         if (offload_activated(tdev)) {
  128                                 client->add(tdev);
  129                         } else
  130                                 CTR1(KTR_CXGB,
  131                                     "cxgb_register_client: %p not activated", tdev);
  132                         
  133                 }
  134         }
  135         mtx_unlock(&cxgb_db_lock);
  136 }
  137 
  138 /**
  139  *      cxgb_unregister_client - unregister an offload client
  140  *      @client: the client
  141  *
  142  *      Remove the client to the client list,
  143  *      and call backs the client for each activated offload device.
  144  */
  145 void
  146 cxgb_unregister_client(struct cxgb_client *client)
  147 {
  148         struct t3cdev *tdev;
  149 
  150         mtx_lock(&cxgb_db_lock);
  151         TAILQ_REMOVE(&client_list, client, client_entry);
  152 
  153         if (client->remove) {
  154                 TAILQ_FOREACH(tdev, &ofld_dev_list, entry) {
  155                         if (offload_activated(tdev))
  156                                 client->remove(tdev);
  157                 }
  158         }
  159         mtx_unlock(&cxgb_db_lock);
  160 }
  161 
  162 /**
  163  *      cxgb_add_clients - activate register clients for an offload device
  164  *      @tdev: the offload device
  165  *
  166  *      Call backs all registered clients once a offload device is activated 
  167  */
  168 void
  169 cxgb_add_clients(struct t3cdev *tdev)
  170 {
  171         struct cxgb_client *client;
  172 
  173         mtx_lock(&cxgb_db_lock);
  174         TAILQ_FOREACH(client, &client_list, client_entry) {
  175                 if (client->add)
  176                         client->add(tdev);
  177         }
  178         mtx_unlock(&cxgb_db_lock);
  179 }
  180 
  181 /**
  182  *      cxgb_remove_clients - activate register clients for an offload device
  183  *      @tdev: the offload device
  184  *
  185  *      Call backs all registered clients once a offload device is deactivated 
  186  */
  187 void
  188 cxgb_remove_clients(struct t3cdev *tdev)
  189 {
  190         struct cxgb_client *client;
  191 
  192         mtx_lock(&cxgb_db_lock);
  193         TAILQ_FOREACH(client, &client_list, client_entry) {
  194                 if (client->remove)
  195                         client->remove(tdev);
  196         }
  197         mtx_unlock(&cxgb_db_lock);
  198 }
  199 #endif
  200 
  201 /**
  202  * cxgb_ofld_recv - process n received offload packets
  203  * @dev: the offload device
  204  * @m: an array of offload packets
  205  * @n: the number of offload packets
  206  *
  207  * Process an array of ingress offload packets.  Each packet is forwarded
  208  * to any active network taps and then passed to the offload device's receive
  209  * method.  We optimize passing packets to the receive method by passing
  210  * it the whole array at once except when there are active taps.
  211  */
  212 int
  213 cxgb_ofld_recv(struct t3cdev *dev, struct mbuf **m, int n)
  214 {
  215 
  216         return dev->recv(dev, m, n);
  217 }
  218 
  219 /*
  220  * Dummy handler for Rx offload packets in case we get an offload packet before
  221  * proper processing is setup.  This complains and drops the packet as it isn't
  222  * normal to get offload packets at this stage.
  223  */
  224 static int
  225 rx_offload_blackhole(struct t3cdev *dev, struct mbuf **m, int n)
  226 {
  227         while (n--)
  228                 m_freem(m[n]);
  229         return 0;
  230 }
  231 
  232 static void
  233 dummy_neigh_update(struct t3cdev *dev, struct rtentry *neigh, uint8_t *enaddr,
  234     struct sockaddr *sa)
  235 {
  236 }
  237 
  238 void
  239 cxgb_set_dummy_ops(struct t3cdev *dev)
  240 {
  241         dev->recv         = rx_offload_blackhole;
  242         dev->arp_update = dummy_neigh_update;
  243 }
  244 
  245 static int
  246 do_smt_write_rpl(struct t3cdev *dev, struct mbuf *m)
  247 {
  248         struct cpl_smt_write_rpl *rpl = cplhdr(m);
  249 
  250         if (rpl->status != CPL_ERR_NONE)
  251                 log(LOG_ERR,
  252                        "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
  253                        rpl->status, GET_TID(rpl));
  254 
  255         return CPL_RET_BUF_DONE;
  256 }
  257 
  258 static int
  259 do_l2t_write_rpl(struct t3cdev *dev, struct mbuf *m)
  260 {
  261         struct cpl_l2t_write_rpl *rpl = cplhdr(m);
  262 
  263         if (rpl->status != CPL_ERR_NONE)
  264                 log(LOG_ERR,
  265                        "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
  266                        rpl->status, GET_TID(rpl));
  267 
  268         return CPL_RET_BUF_DONE;
  269 }
  270 
  271 static int
  272 do_rte_write_rpl(struct t3cdev *dev, struct mbuf *m)
  273 {
  274         struct cpl_rte_write_rpl *rpl = cplhdr(m);
  275 
  276         if (rpl->status != CPL_ERR_NONE)
  277                 log(LOG_ERR,
  278                        "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
  279                        rpl->status, GET_TID(rpl));
  280 
  281         return CPL_RET_BUF_DONE;
  282 }
  283 
  284 static int
  285 do_set_tcb_rpl(struct t3cdev *dev, struct mbuf *m)
  286 {
  287         struct cpl_set_tcb_rpl *rpl = cplhdr(m);
  288 
  289         if (rpl->status != CPL_ERR_NONE)
  290                 log(LOG_ERR,
  291                     "Unexpected SET_TCB_RPL status %u for tid %u\n",
  292                         rpl->status, GET_TID(rpl));
  293         return CPL_RET_BUF_DONE;
  294 }
  295 
  296 static int
  297 do_trace(struct t3cdev *dev, struct mbuf *m)
  298 {
  299 #if 0
  300         struct cpl_trace_pkt *p = cplhdr(m);
  301 
  302 
  303         skb->protocol = 0xffff;
  304         skb->dev = dev->lldev;
  305         skb_pull(skb, sizeof(*p));
  306         skb->mac.raw = mtod(m, (char *));
  307         netif_receive_skb(skb);
  308 #endif  
  309         return 0;
  310 }
  311 
  312 /*
  313  * Process a received packet with an unknown/unexpected CPL opcode.
  314  */
  315 static int
  316 do_bad_cpl(struct t3cdev *dev, struct mbuf *m)
  317 {
  318         log(LOG_ERR, "%s: received bad CPL command 0x%x\n", dev->name,
  319             0xFF & *mtod(m, uint32_t *));
  320         return (CPL_RET_BUF_DONE | CPL_RET_BAD_MSG);
  321 }
  322 
  323 /*
  324  * Handlers for each CPL opcode
  325  */
  326 static cpl_handler_func cpl_handlers[256];
  327 
  328 /*
  329  * T3CDEV's receive method.
  330  */
  331 int
  332 process_rx(struct t3cdev *dev, struct mbuf **m, int n)
  333 {
  334         while (n--) {
  335                 struct mbuf *m0 = *m++;
  336                 unsigned int opcode = G_OPCODE(ntohl(m0->m_pkthdr.csum_data));
  337                 int ret;
  338 
  339                 DPRINTF("processing op=0x%x m=%p data=%p\n", opcode, m0, m0->m_data);
  340                 
  341                 ret = cpl_handlers[opcode] (dev, m0);
  342 
  343 #if VALIDATE_TID
  344                 if (ret & CPL_RET_UNKNOWN_TID) {
  345                         union opcode_tid *p = cplhdr(m0);
  346 
  347                         log(LOG_ERR, "%s: CPL message (opcode %u) had "
  348                                "unknown TID %u\n", dev->name, opcode,
  349                                G_TID(ntohl(p->opcode_tid)));
  350                 }
  351 #endif
  352                 if (ret & CPL_RET_BUF_DONE)
  353                         m_freem(m0);
  354         }
  355         return 0;
  356 }
  357 
  358 /*
  359  * Add a new handler to the CPL dispatch table.  A NULL handler may be supplied
  360  * to unregister an existing handler.
  361  */
  362 void
  363 t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
  364 {
  365         if (opcode < NUM_CPL_CMDS)
  366                 cpl_handlers[opcode] = h ? h : do_bad_cpl;
  367         else
  368                 log(LOG_ERR, "T3C: handler registration for "
  369                        "opcode %x failed\n", opcode);
  370 }
  371 
  372 /*
  373  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
  374  * The allocated memory is cleared.
  375  */
  376 void *
  377 cxgb_alloc_mem(unsigned long size)
  378 {
  379 
  380         return malloc(size, M_CXGB, M_ZERO|M_NOWAIT);
  381 }
  382 
  383 /*
  384  * Free memory allocated through t3_alloc_mem().
  385  */
  386 void
  387 cxgb_free_mem(void *addr)
  388 {
  389         free(addr, M_CXGB);
  390 }
  391 
  392 static __inline int
  393 adap2type(struct adapter *adapter) 
  394 { 
  395         int type = 0; 
  396  
  397         switch (adapter->params.rev) { 
  398         case T3_REV_A: 
  399                 type = T3A; 
  400                 break; 
  401         case T3_REV_B: 
  402         case T3_REV_B2: 
  403                 type = T3B; 
  404                 break; 
  405         case T3_REV_C: 
  406                 type = T3C; 
  407                 break; 
  408         } 
  409         return type; 
  410 }
  411 
  412 void
  413 cxgb_adapter_ofld(struct adapter *adapter)
  414 {
  415         struct t3cdev *tdev = &adapter->tdev;
  416 
  417         cxgb_set_dummy_ops(tdev);
  418         tdev->type = adap2type(adapter);
  419         tdev->adapter = adapter;
  420         register_tdev(tdev);    
  421 
  422 }
  423 
  424 void
  425 cxgb_adapter_unofld(struct adapter *adapter)
  426 {
  427         struct t3cdev *tdev = &adapter->tdev;
  428 
  429         tdev->recv = NULL;
  430         tdev->arp_update = NULL;
  431         unregister_tdev(tdev);  
  432 }
  433 
  434 void
  435 cxgb_offload_init(void)
  436 {
  437         int i;
  438 
  439         if (inited++)
  440                 return;
  441         
  442         mtx_init(&cxgb_db_lock, "ofld db", NULL, MTX_DEF);
  443 
  444         TAILQ_INIT(&client_list);
  445         TAILQ_INIT(&ofld_dev_list);
  446         
  447         for (i = 0; i < 0x100; ++i)
  448                 cpl_handlers[i] = do_bad_cpl;
  449         
  450         t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
  451         t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl);
  452         t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
  453 
  454         t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
  455         t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
  456         
  457 }
  458 
  459 void 
  460 cxgb_offload_exit(void)
  461 {
  462 
  463         if (--inited)
  464                 return;
  465 
  466         mtx_destroy(&cxgb_db_lock);
  467 }
  468 
  469 MODULE_VERSION(if_cxgb, 1);

Cache object: bc0061e4cdc0ff80d96b484cdee06b11


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.