The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgbe/adapter.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2011 Chelsio Communications, Inc.
    3  * All rights reserved.
    4  * Written by: Navdeep Parhar <np@FreeBSD.org>
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  *
   27  * $FreeBSD: releng/8.4/sys/dev/cxgbe/adapter.h 247670 2013-03-02 21:59:07Z np $
   28  *
   29  */
   30 
   31 #ifndef __T4_ADAPTER_H__
   32 #define __T4_ADAPTER_H__
   33 
   34 #include <sys/kernel.h>
   35 #include <sys/bus.h>
   36 #include <sys/rman.h>
   37 #include <sys/types.h>
   38 #include <sys/malloc.h>
   39 #include <dev/pci/pcivar.h>
   40 #include <dev/pci/pcireg.h>
   41 #include <machine/bus.h>
   42 #include <sys/socket.h>
   43 #include <sys/sysctl.h>
   44 #include <net/ethernet.h>
   45 #include <net/if.h>
   46 #include <net/if_media.h>
   47 #include <netinet/in.h>
   48 #include <netinet/tcp_lro.h>
   49 
   50 #include "offload.h"
   51 #include "firmware/t4fw_interface.h"
   52 
   53 #define T4_CFGNAME "t4fw_cfg"
   54 #define T4_FWNAME "t4fw"
   55 
   56 MALLOC_DECLARE(M_CXGBE);
   57 #define CXGBE_UNIMPLEMENTED(s) \
   58     panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__)
   59 
   60 #if defined(__i386__) || defined(__amd64__)
   61 static __inline void
   62 prefetch(void *x)
   63 {
   64         __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
   65 }
   66 #else
   67 #define prefetch(x)
   68 #endif
   69 
   70 #ifndef SYSCTL_ADD_UQUAD
   71 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
   72 #define sysctl_handle_64 sysctl_handle_quad
   73 #define CTLTYPE_U64 CTLTYPE_QUAD
   74 #endif
   75 
   76 #if (__FreeBSD_version >= 900030) || \
   77     ((__FreeBSD_version >= 802507) && (__FreeBSD_version < 900000))
   78 #define SBUF_DRAIN 1
   79 #endif
   80 
   81 #if (__FreeBSD_version < 900000)
   82 #define IFCAP_RXCSUM_IPV6 0
   83 #define IFCAP_TXCSUM_IPV6 0
   84 #define CSUM_DATA_VALID_IPV6 0
   85 #define IFCAP_HWCSUM_IPV6 0
   86 #define CSUM_UDP_IPV6 0
   87 #define CSUM_TCP_IPV6 0
   88 #endif
   89 
   90 #ifdef __amd64__
   91 /* XXX: need systemwide bus_space_read_8/bus_space_write_8 */
   92 static __inline uint64_t
   93 t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
   94     bus_size_t offset)
   95 {
   96         KASSERT(tag == AMD64_BUS_SPACE_MEM,
   97             ("%s: can only handle mem space", __func__));
   98 
   99         return (*(volatile uint64_t *)(handle + offset));
  100 }
  101 
  102 static __inline void
  103 t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
  104     bus_size_t offset, uint64_t value)
  105 {
  106         KASSERT(tag == AMD64_BUS_SPACE_MEM,
  107             ("%s: can only handle mem space", __func__));
  108 
  109         *(volatile uint64_t *)(bsh + offset) = value;
  110 }
  111 #else
  112 static __inline uint64_t
  113 t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
  114     bus_size_t offset)
  115 {
  116         return (uint64_t)bus_space_read_4(tag, handle, offset) +
  117             ((uint64_t)bus_space_read_4(tag, handle, offset + 4) << 32);
  118 }
  119 
  120 static __inline void
  121 t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
  122     bus_size_t offset, uint64_t value)
  123 {
  124         bus_space_write_4(tag, bsh, offset, value);
  125         bus_space_write_4(tag, bsh, offset + 4, value >> 32);
  126 }
  127 #endif
  128 
  129 struct adapter;
  130 typedef struct adapter adapter_t;
  131 
  132 enum {
  133         FW_IQ_QSIZE = 256,
  134         FW_IQ_ESIZE = 64,       /* At least 64 mandated by the firmware spec */
  135 
  136         RX_IQ_QSIZE = 1024,
  137         RX_IQ_ESIZE = 64,       /* At least 64 so CPL_RX_PKT will fit */
  138 
  139         EQ_ESIZE = 64,          /* All egress queues use this entry size */
  140 
  141         RX_FL_ESIZE = EQ_ESIZE, /* 8 64bit addresses */
  142 #if MJUMPAGESIZE != MCLBYTES
  143         FL_BUF_SIZES = 4,       /* cluster, jumbop, jumbo9k, jumbo16k */
  144 #else
  145         FL_BUF_SIZES = 3,       /* cluster, jumbo9k, jumbo16k */
  146 #endif
  147         OFLD_BUF_SIZE = MJUM16BYTES,    /* size of fl buffer for TOE rxq */
  148 
  149         CTRL_EQ_QSIZE = 128,
  150 
  151         TX_EQ_QSIZE = 1024,
  152         TX_SGL_SEGS = 36,
  153         TX_WR_FLITS = SGE_MAX_WR_LEN / 8
  154 };
  155 
  156 #ifdef T4_PKT_TIMESTAMP
  157 #define RX_COPY_THRESHOLD (MINCLSIZE - 8)
  158 #else
  159 #define RX_COPY_THRESHOLD MINCLSIZE
  160 #endif
  161 
  162 enum {
  163         /* adapter intr_type */
  164         INTR_INTX       = (1 << 0),
  165         INTR_MSI        = (1 << 1),
  166         INTR_MSIX       = (1 << 2)
  167 };
  168 
  169 enum {
  170         /* flags understood by begin_synchronized_op */
  171         HOLD_LOCK       = (1 << 0),
  172         SLEEP_OK        = (1 << 1),
  173         INTR_OK         = (1 << 2),
  174 
  175         /* flags understood by end_synchronized_op */
  176         LOCK_HELD       = HOLD_LOCK,
  177 };
  178 
  179 enum {
  180         /* adapter flags */
  181         FULL_INIT_DONE  = (1 << 0),
  182         FW_OK           = (1 << 1),
  183         INTR_DIRECT     = (1 << 2),     /* direct interrupts for everything */
  184         MASTER_PF       = (1 << 3),
  185         ADAP_SYSCTL_CTX = (1 << 4),
  186         TOM_INIT_DONE   = (1 << 5),
  187 
  188         CXGBE_BUSY      = (1 << 9),
  189 
  190         /* port flags */
  191         DOOMED          = (1 << 0),
  192         PORT_INIT_DONE  = (1 << 1),
  193         PORT_SYSCTL_CTX = (1 << 2),
  194 };
  195 
  196 #define IS_DOOMED(pi)   ((pi)->flags & DOOMED)
  197 #define SET_DOOMED(pi)  do {(pi)->flags |= DOOMED;} while (0)
  198 #define IS_BUSY(sc)     ((sc)->flags & CXGBE_BUSY)
  199 #define SET_BUSY(sc)    do {(sc)->flags |= CXGBE_BUSY;} while (0)
  200 #define CLR_BUSY(sc)    do {(sc)->flags &= ~CXGBE_BUSY;} while (0)
  201 
  202 struct port_info {
  203         device_t dev;
  204         struct adapter *adapter;
  205 
  206         struct ifnet *ifp;
  207         struct ifmedia media;
  208 
  209         struct mtx pi_lock;
  210         char lockname[16];
  211         unsigned long flags;
  212         int if_flags;
  213 
  214         uint16_t viid;
  215         int16_t  xact_addr_filt;/* index of exact MAC address filter */
  216         uint16_t rss_size;      /* size of VI's RSS table slice */
  217         uint8_t  lport;         /* associated offload logical port */
  218         int8_t   mdio_addr;
  219         uint8_t  port_type;
  220         uint8_t  mod_type;
  221         uint8_t  port_id;
  222         uint8_t  tx_chan;
  223 
  224         /* These need to be int as they are used in sysctl */
  225         int ntxq;       /* # of tx queues */
  226         int first_txq;  /* index of first tx queue */
  227         int nrxq;       /* # of rx queues */
  228         int first_rxq;  /* index of first rx queue */
  229 #ifdef TCP_OFFLOAD
  230         int nofldtxq;           /* # of offload tx queues */
  231         int first_ofld_txq;     /* index of first offload tx queue */
  232         int nofldrxq;           /* # of offload rx queues */
  233         int first_ofld_rxq;     /* index of first offload rx queue */
  234 #endif
  235         int tmr_idx;
  236         int pktc_idx;
  237         int qsize_rxq;
  238         int qsize_txq;
  239 
  240         struct link_config link_cfg;
  241         struct port_stats stats;
  242 
  243         struct callout tick;
  244         struct sysctl_ctx_list ctx;     /* from ifconfig up to driver detach */
  245 
  246         uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
  247 };
  248 
  249 struct fl_sdesc {
  250         struct mbuf *m;
  251         bus_dmamap_t map;
  252         caddr_t cl;
  253         uint8_t tag_idx;        /* the sc->fl_tag this map comes from */
  254 #ifdef INVARIANTS
  255         __be64 ba_tag;
  256 #endif
  257 };
  258 
  259 struct tx_desc {
  260         __be64 flit[8];
  261 };
  262 
  263 struct tx_map {
  264         struct mbuf *m;
  265         bus_dmamap_t map;
  266 };
  267 
  268 /* DMA maps used for tx */
  269 struct tx_maps {
  270         struct tx_map *maps;
  271         uint32_t map_total;     /* # of DMA maps */
  272         uint32_t map_pidx;      /* next map to be used */
  273         uint32_t map_cidx;      /* reclaimed up to this index */
  274         uint32_t map_avail;     /* # of available maps */
  275 };
  276 
  277 struct tx_sdesc {
  278         uint8_t desc_used;      /* # of hardware descriptors used by the WR */
  279         uint8_t credits;        /* NIC txq: # of frames sent out in the WR */
  280 };
  281 
  282 enum {
  283         /* iq flags */
  284         IQ_ALLOCATED    = (1 << 0),     /* firmware resources allocated */
  285         IQ_HAS_FL       = (1 << 1),     /* iq associated with a freelist */
  286         IQ_INTR         = (1 << 2),     /* iq takes direct interrupt */
  287         IQ_LRO_ENABLED  = (1 << 3),     /* iq is an eth rxq with LRO enabled */
  288 
  289         /* iq state */
  290         IQS_DISABLED    = 0,
  291         IQS_BUSY        = 1,
  292         IQS_IDLE        = 2,
  293 };
  294 
  295 /*
  296  * Ingress Queue: T4 is producer, driver is consumer.
  297  */
  298 struct sge_iq {
  299         bus_dma_tag_t desc_tag;
  300         bus_dmamap_t desc_map;
  301         bus_addr_t ba;          /* bus address of descriptor ring */
  302         uint32_t flags;
  303         uint16_t abs_id;        /* absolute SGE id for the iq */
  304         int8_t   intr_pktc_idx; /* packet count threshold index */
  305         int8_t   pad0;
  306         __be64  *desc;          /* KVA of descriptor ring */
  307 
  308         volatile int state;
  309         struct adapter *adapter;
  310         const __be64 *cdesc;    /* current descriptor */
  311         uint8_t  gen;           /* generation bit */
  312         uint8_t  intr_params;   /* interrupt holdoff parameters */
  313         uint8_t  intr_next;     /* XXX: holdoff for next interrupt */
  314         uint8_t  esize;         /* size (bytes) of each entry in the queue */
  315         uint16_t qsize;         /* size (# of entries) of the queue */
  316         uint16_t cidx;          /* consumer index */
  317         uint16_t cntxt_id;      /* SGE context id for the iq */
  318 
  319         STAILQ_ENTRY(sge_iq) link;
  320 };
  321 
  322 enum {
  323         EQ_CTRL         = 1,
  324         EQ_ETH          = 2,
  325 #ifdef TCP_OFFLOAD
  326         EQ_OFLD         = 3,
  327 #endif
  328 
  329         /* eq flags */
  330         EQ_TYPEMASK     = 7,            /* 3 lsbits hold the type */
  331         EQ_ALLOCATED    = (1 << 3),     /* firmware resources allocated */
  332         EQ_DOOMED       = (1 << 4),     /* about to be destroyed */
  333         EQ_CRFLUSHED    = (1 << 5),     /* expecting an update from SGE */
  334         EQ_STALLED      = (1 << 6),     /* out of hw descriptors or dmamaps */
  335 };
  336 
  337 /*
  338  * Egress Queue: driver is producer, T4 is consumer.
  339  *
  340  * Note: A free list is an egress queue (driver produces the buffers and T4
  341  * consumes them) but it's special enough to have its own struct (see sge_fl).
  342  */
  343 struct sge_eq {
  344         unsigned int flags;     /* MUST be first */
  345         unsigned int cntxt_id;  /* SGE context id for the eq */
  346         bus_dma_tag_t desc_tag;
  347         bus_dmamap_t desc_map;
  348         char lockname[16];
  349         struct mtx eq_lock;
  350 
  351         struct tx_desc *desc;   /* KVA of descriptor ring */
  352         bus_addr_t ba;          /* bus address of descriptor ring */
  353         struct sge_qstat *spg;  /* status page, for convenience */
  354         uint16_t cap;           /* max # of desc, for convenience */
  355         uint16_t avail;         /* available descriptors, for convenience */
  356         uint16_t qsize;         /* size (# of entries) of the queue */
  357         uint16_t cidx;          /* consumer idx (desc idx) */
  358         uint16_t pidx;          /* producer idx (desc idx) */
  359         uint16_t pending;       /* # of descriptors used since last doorbell */
  360         uint16_t iqid;          /* iq that gets egr_update for the eq */
  361         uint8_t tx_chan;        /* tx channel used by the eq */
  362         struct task tx_task;
  363         struct callout tx_callout;
  364 
  365         /* stats */
  366 
  367         uint32_t egr_update;    /* # of SGE_EGR_UPDATE notifications for eq */
  368         uint32_t unstalled;     /* recovered from stall */
  369 };
  370 
  371 enum {
  372         FL_STARVING     = (1 << 0), /* on the adapter's list of starving fl's */
  373         FL_DOOMED       = (1 << 1), /* about to be destroyed */
  374 };
  375 
  376 #define FL_RUNNING_LOW(fl)      (fl->cap - fl->needed <= fl->lowat)
  377 #define FL_NOT_RUNNING_LOW(fl)  (fl->cap - fl->needed >= 2 * fl->lowat)
  378 
  379 struct sge_fl {
  380         bus_dma_tag_t desc_tag;
  381         bus_dmamap_t desc_map;
  382         bus_dma_tag_t tag[FL_BUF_SIZES];
  383         uint8_t tag_idx;
  384         struct mtx fl_lock;
  385         char lockname[16];
  386         int flags;
  387 
  388         __be64 *desc;           /* KVA of descriptor ring, ptr to addresses */
  389         bus_addr_t ba;          /* bus address of descriptor ring */
  390         struct fl_sdesc *sdesc; /* KVA of software descriptor ring */
  391         uint32_t cap;           /* max # of buffers, for convenience */
  392         uint16_t qsize;         /* size (# of entries) of the queue */
  393         uint16_t cntxt_id;      /* SGE context id for the freelist */
  394         uint32_t cidx;          /* consumer idx (buffer idx, NOT hw desc idx) */
  395         uint32_t pidx;          /* producer idx (buffer idx, NOT hw desc idx) */
  396         uint32_t needed;        /* # of buffers needed to fill up fl. */
  397         uint32_t lowat;         /* # of buffers <= this means fl needs help */
  398         uint32_t pending;       /* # of bufs allocated since last doorbell */
  399         unsigned int dmamap_failed;
  400         TAILQ_ENTRY(sge_fl) link; /* All starving freelists */
  401 };
  402 
  403 /* txq: SGE egress queue + what's needed for Ethernet NIC */
  404 struct sge_txq {
  405         struct sge_eq eq;       /* MUST be first */
  406 
  407         struct ifnet *ifp;      /* the interface this txq belongs to */
  408         bus_dma_tag_t tx_tag;   /* tag for transmit buffers */
  409         struct buf_ring *br;    /* tx buffer ring */
  410         struct tx_sdesc *sdesc; /* KVA of software descriptor ring */
  411         struct mbuf *m;         /* held up due to temporary resource shortage */
  412 
  413         struct tx_maps txmaps;
  414 
  415         /* stats for common events first */
  416 
  417         uint64_t txcsum;        /* # of times hardware assisted with checksum */
  418         uint64_t tso_wrs;       /* # of TSO work requests */
  419         uint64_t vlan_insertion;/* # of times VLAN tag was inserted */
  420         uint64_t imm_wrs;       /* # of work requests with immediate data */
  421         uint64_t sgl_wrs;       /* # of work requests with direct SGL */
  422         uint64_t txpkt_wrs;     /* # of txpkt work requests (not coalesced) */
  423         uint64_t txpkts_wrs;    /* # of coalesced tx work requests */
  424         uint64_t txpkts_pkts;   /* # of frames in coalesced tx work requests */
  425 
  426         /* stats for not-that-common events */
  427 
  428         uint32_t no_dmamap;     /* no DMA map to load the mbuf */
  429         uint32_t no_desc;       /* out of hardware descriptors */
  430 } __aligned(CACHE_LINE_SIZE);
  431 
  432 /* rxq: SGE ingress queue + SGE free list + miscellaneous items */
  433 struct sge_rxq {
  434         struct sge_iq iq;       /* MUST be first */
  435         struct sge_fl fl;       /* MUST follow iq */
  436 
  437         struct ifnet *ifp;      /* the interface this rxq belongs to */
  438 #if defined(INET) || defined(INET6)
  439         struct lro_ctrl lro;    /* LRO state */
  440 #endif
  441 
  442         /* stats for common events first */
  443 
  444         uint64_t rxcsum;        /* # of times hardware assisted with checksum */
  445         uint64_t vlan_extraction;/* # of times VLAN tag was extracted */
  446 
  447         /* stats for not-that-common events */
  448 
  449 } __aligned(CACHE_LINE_SIZE);
  450 
  451 static inline struct sge_rxq *
  452 iq_to_rxq(struct sge_iq *iq)
  453 {
  454 
  455         return (member2struct(sge_rxq, iq, iq));
  456 }
  457 
  458 
  459 #ifdef TCP_OFFLOAD
  460 /* ofld_rxq: SGE ingress queue + SGE free list + miscellaneous items */
  461 struct sge_ofld_rxq {
  462         struct sge_iq iq;       /* MUST be first */
  463         struct sge_fl fl;       /* MUST follow iq */
  464 } __aligned(CACHE_LINE_SIZE);
  465 
  466 static inline struct sge_ofld_rxq *
  467 iq_to_ofld_rxq(struct sge_iq *iq)
  468 {
  469 
  470         return (member2struct(sge_ofld_rxq, iq, iq));
  471 }
  472 #endif
  473 
  474 struct wrqe {
  475         STAILQ_ENTRY(wrqe) link;
  476         struct sge_wrq *wrq;
  477         int wr_len;
  478         uint64_t wr[] __aligned(16);
  479 };
  480 
  481 /*
  482  * wrq: SGE egress queue that is given prebuilt work requests.  Both the control
  483  * and offload tx queues are of this type.
  484  */
  485 struct sge_wrq {
  486         struct sge_eq eq;       /* MUST be first */
  487 
  488         struct adapter *adapter;
  489 
  490         /* List of WRs held up due to lack of tx descriptors */
  491         STAILQ_HEAD(, wrqe) wr_list;
  492 
  493         /* stats for common events first */
  494 
  495         uint64_t tx_wrs;        /* # of tx work requests */
  496 
  497         /* stats for not-that-common events */
  498 
  499         uint32_t no_desc;       /* out of hardware descriptors */
  500 } __aligned(CACHE_LINE_SIZE);
  501 
  502 struct sge {
  503         int timer_val[SGE_NTIMERS];
  504         int counter_val[SGE_NCOUNTERS];
  505         int fl_starve_threshold;
  506 
  507         int nrxq;       /* total # of Ethernet rx queues */
  508         int ntxq;       /* total # of Ethernet tx tx queues */
  509 #ifdef TCP_OFFLOAD
  510         int nofldrxq;   /* total # of TOE rx queues */
  511         int nofldtxq;   /* total # of TOE tx queues */
  512 #endif
  513         int niq;        /* total # of ingress queues */
  514         int neq;        /* total # of egress queues */
  515 
  516         struct sge_iq fwq;      /* Firmware event queue */
  517         struct sge_wrq mgmtq;   /* Management queue (control queue) */
  518         struct sge_wrq *ctrlq;  /* Control queues */
  519         struct sge_txq *txq;    /* NIC tx queues */
  520         struct sge_rxq *rxq;    /* NIC rx queues */
  521 #ifdef TCP_OFFLOAD
  522         struct sge_wrq *ofld_txq;       /* TOE tx queues */
  523         struct sge_ofld_rxq *ofld_rxq;  /* TOE rx queues */
  524 #endif
  525 
  526         uint16_t iq_start;
  527         int eq_start;
  528         struct sge_iq **iqmap;  /* iq->cntxt_id to iq mapping */
  529         struct sge_eq **eqmap;  /* eq->cntxt_id to eq mapping */
  530 };
  531 
  532 struct rss_header;
  533 typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *,
  534     struct mbuf *);
  535 typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *);
  536 typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *);
  537 
  538 struct adapter {
  539         SLIST_ENTRY(adapter) link;
  540         device_t dev;
  541         struct cdev *cdev;
  542 
  543         /* PCIe register resources */
  544         int regs_rid;
  545         struct resource *regs_res;
  546         int msix_rid;
  547         struct resource *msix_res;
  548         bus_space_handle_t bh;
  549         bus_space_tag_t bt;
  550         bus_size_t mmio_len;
  551 
  552         unsigned int pf;
  553         unsigned int mbox;
  554 
  555         /* Interrupt information */
  556         int intr_type;
  557         int intr_count;
  558         struct irq {
  559                 struct resource *res;
  560                 int rid;
  561                 void *tag;
  562         } *irq;
  563 
  564         bus_dma_tag_t dmat;     /* Parent DMA tag */
  565 
  566         struct sge sge;
  567 
  568         struct taskqueue *tq[NCHAN];    /* taskqueues that flush data out */
  569         struct port_info *port[MAX_NPORTS];
  570         uint8_t chan_map[NCHAN];
  571         uint32_t filter_mode;
  572 
  573 #ifdef TCP_OFFLOAD
  574         void *tom_softc;        /* (struct tom_data *) */
  575         struct tom_tunables tt;
  576 #endif
  577         struct l2t_data *l2t;   /* L2 table */
  578         struct tid_info tids;
  579 
  580         int open_device_map;
  581 #ifdef TCP_OFFLOAD
  582         int offload_map;
  583 #endif
  584         int flags;
  585 
  586         char fw_version[32];
  587         char cfg_file[32];
  588         u_int cfcsum;
  589         struct adapter_params params;
  590         struct t4_virt_res vres;
  591 
  592         uint16_t linkcaps;
  593         uint16_t niccaps;
  594         uint16_t toecaps;
  595         uint16_t rdmacaps;
  596         uint16_t iscsicaps;
  597         uint16_t fcoecaps;
  598 
  599         struct sysctl_ctx_list ctx; /* from adapter_full_init to full_uninit */
  600 
  601         struct mtx sc_lock;
  602         char lockname[16];
  603 
  604         /* Starving free lists */
  605         struct mtx sfl_lock;    /* same cache-line as sc_lock? but that's ok */
  606         TAILQ_HEAD(, sge_fl) sfl;
  607         struct callout sfl_callout;
  608 
  609         an_handler_t an_handler __aligned(CACHE_LINE_SIZE);
  610         fw_msg_handler_t fw_msg_handler[5];     /* NUM_FW6_TYPES */
  611         cpl_handler_t cpl_handler[0xef];        /* NUM_CPL_CMDS */
  612 
  613 #ifdef INVARIANTS
  614         const char *last_op;
  615         const void *last_op_thr;
  616 #endif
  617 };
  618 
  619 #define ADAPTER_LOCK(sc)                mtx_lock(&(sc)->sc_lock)
  620 #define ADAPTER_UNLOCK(sc)              mtx_unlock(&(sc)->sc_lock)
  621 #define ADAPTER_LOCK_ASSERT_OWNED(sc)   mtx_assert(&(sc)->sc_lock, MA_OWNED)
  622 #define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED)
  623 
  624 /* XXX: not bulletproof, but much better than nothing */
  625 #define ASSERT_SYNCHRONIZED_OP(sc)      \
  626     KASSERT(IS_BUSY(sc) && \
  627         (mtx_owned(&(sc)->sc_lock) || sc->last_op_thr == curthread), \
  628         ("%s: operation not synchronized.", __func__))
  629 
  630 #define PORT_LOCK(pi)                   mtx_lock(&(pi)->pi_lock)
  631 #define PORT_UNLOCK(pi)                 mtx_unlock(&(pi)->pi_lock)
  632 #define PORT_LOCK_ASSERT_OWNED(pi)      mtx_assert(&(pi)->pi_lock, MA_OWNED)
  633 #define PORT_LOCK_ASSERT_NOTOWNED(pi)   mtx_assert(&(pi)->pi_lock, MA_NOTOWNED)
  634 
  635 #define FL_LOCK(fl)                     mtx_lock(&(fl)->fl_lock)
  636 #define FL_TRYLOCK(fl)                  mtx_trylock(&(fl)->fl_lock)
  637 #define FL_UNLOCK(fl)                   mtx_unlock(&(fl)->fl_lock)
  638 #define FL_LOCK_ASSERT_OWNED(fl)        mtx_assert(&(fl)->fl_lock, MA_OWNED)
  639 #define FL_LOCK_ASSERT_NOTOWNED(fl)     mtx_assert(&(fl)->fl_lock, MA_NOTOWNED)
  640 
  641 #define RXQ_FL_LOCK(rxq)                FL_LOCK(&(rxq)->fl)
  642 #define RXQ_FL_UNLOCK(rxq)              FL_UNLOCK(&(rxq)->fl)
  643 #define RXQ_FL_LOCK_ASSERT_OWNED(rxq)   FL_LOCK_ASSERT_OWNED(&(rxq)->fl)
  644 #define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl)
  645 
  646 #define EQ_LOCK(eq)                     mtx_lock(&(eq)->eq_lock)
  647 #define EQ_TRYLOCK(eq)                  mtx_trylock(&(eq)->eq_lock)
  648 #define EQ_UNLOCK(eq)                   mtx_unlock(&(eq)->eq_lock)
  649 #define EQ_LOCK_ASSERT_OWNED(eq)        mtx_assert(&(eq)->eq_lock, MA_OWNED)
  650 #define EQ_LOCK_ASSERT_NOTOWNED(eq)     mtx_assert(&(eq)->eq_lock, MA_NOTOWNED)
  651 
  652 #define TXQ_LOCK(txq)                   EQ_LOCK(&(txq)->eq)
  653 #define TXQ_TRYLOCK(txq)                EQ_TRYLOCK(&(txq)->eq)
  654 #define TXQ_UNLOCK(txq)                 EQ_UNLOCK(&(txq)->eq)
  655 #define TXQ_LOCK_ASSERT_OWNED(txq)      EQ_LOCK_ASSERT_OWNED(&(txq)->eq)
  656 #define TXQ_LOCK_ASSERT_NOTOWNED(txq)   EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq)
  657 
  658 #define for_each_txq(pi, iter, q) \
  659         for (q = &pi->adapter->sge.txq[pi->first_txq], iter = 0; \
  660             iter < pi->ntxq; ++iter, ++q)
  661 #define for_each_rxq(pi, iter, q) \
  662         for (q = &pi->adapter->sge.rxq[pi->first_rxq], iter = 0; \
  663             iter < pi->nrxq; ++iter, ++q)
  664 #define for_each_ofld_txq(pi, iter, q) \
  665         for (q = &pi->adapter->sge.ofld_txq[pi->first_ofld_txq], iter = 0; \
  666             iter < pi->nofldtxq; ++iter, ++q)
  667 #define for_each_ofld_rxq(pi, iter, q) \
  668         for (q = &pi->adapter->sge.ofld_rxq[pi->first_ofld_rxq], iter = 0; \
  669             iter < pi->nofldrxq; ++iter, ++q)
  670 
  671 /* One for errors, one for firmware events */
  672 #define T4_EXTRA_INTR 2
  673 
  674 static inline uint32_t
  675 t4_read_reg(struct adapter *sc, uint32_t reg)
  676 {
  677 
  678         return bus_space_read_4(sc->bt, sc->bh, reg);
  679 }
  680 
  681 static inline void
  682 t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val)
  683 {
  684 
  685         bus_space_write_4(sc->bt, sc->bh, reg, val);
  686 }
  687 
  688 static inline uint64_t
  689 t4_read_reg64(struct adapter *sc, uint32_t reg)
  690 {
  691 
  692         return t4_bus_space_read_8(sc->bt, sc->bh, reg);
  693 }
  694 
  695 static inline void
  696 t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val)
  697 {
  698 
  699         t4_bus_space_write_8(sc->bt, sc->bh, reg, val);
  700 }
  701 
  702 static inline void
  703 t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val)
  704 {
  705 
  706         *val = pci_read_config(sc->dev, reg, 1);
  707 }
  708 
  709 static inline void
  710 t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val)
  711 {
  712 
  713         pci_write_config(sc->dev, reg, val, 1);
  714 }
  715 
  716 static inline void
  717 t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val)
  718 {
  719 
  720         *val = pci_read_config(sc->dev, reg, 2);
  721 }
  722 
  723 static inline void
  724 t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val)
  725 {
  726 
  727         pci_write_config(sc->dev, reg, val, 2);
  728 }
  729 
  730 static inline void
  731 t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val)
  732 {
  733 
  734         *val = pci_read_config(sc->dev, reg, 4);
  735 }
  736 
  737 static inline void
  738 t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val)
  739 {
  740 
  741         pci_write_config(sc->dev, reg, val, 4);
  742 }
  743 
  744 static inline struct port_info *
  745 adap2pinfo(struct adapter *sc, int idx)
  746 {
  747 
  748         return (sc->port[idx]);
  749 }
  750 
  751 static inline void
  752 t4_os_set_hw_addr(struct adapter *sc, int idx, uint8_t hw_addr[])
  753 {
  754 
  755         bcopy(hw_addr, sc->port[idx]->hw_addr, ETHER_ADDR_LEN);
  756 }
  757 
  758 static inline bool is_10G_port(const struct port_info *pi)
  759 {
  760 
  761         return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) != 0);
  762 }
  763 
  764 static inline int tx_resume_threshold(struct sge_eq *eq)
  765 {
  766 
  767         return (eq->qsize / 4);
  768 }
  769 
  770 /* t4_main.c */
  771 void t4_tx_task(void *, int);
  772 void t4_tx_callout(void *);
  773 int t4_os_find_pci_capability(struct adapter *, int);
  774 int t4_os_pci_save_state(struct adapter *);
  775 int t4_os_pci_restore_state(struct adapter *);
  776 void t4_os_portmod_changed(const struct adapter *, int);
  777 void t4_os_link_changed(struct adapter *, int, int);
  778 void t4_iterate(void (*)(struct adapter *, void *), void *);
  779 int t4_register_cpl_handler(struct adapter *, int, cpl_handler_t);
  780 int t4_register_an_handler(struct adapter *, an_handler_t);
  781 int t4_register_fw_msg_handler(struct adapter *, int, fw_msg_handler_t);
  782 int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
  783 int begin_synchronized_op(struct adapter *, struct port_info *, int, char *);
  784 void end_synchronized_op(struct adapter *, int);
  785 
  786 /* t4_sge.c */
  787 void t4_sge_modload(void);
  788 int t4_sge_init(struct adapter *);
  789 int t4_create_dma_tag(struct adapter *);
  790 int t4_destroy_dma_tag(struct adapter *);
  791 int t4_setup_adapter_queues(struct adapter *);
  792 int t4_teardown_adapter_queues(struct adapter *);
  793 int t4_setup_port_queues(struct port_info *);
  794 int t4_teardown_port_queues(struct port_info *);
  795 int t4_alloc_tx_maps(struct tx_maps *, bus_dma_tag_t, int, int);
  796 void t4_free_tx_maps(struct tx_maps *, bus_dma_tag_t);
  797 void t4_intr_all(void *);
  798 void t4_intr(void *);
  799 void t4_intr_err(void *);
  800 void t4_intr_evt(void *);
  801 void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *);
  802 int t4_eth_tx(struct ifnet *, struct sge_txq *, struct mbuf *);
  803 void t4_update_fl_bufsize(struct ifnet *);
  804 int can_resume_tx(struct sge_eq *);
  805 
  806 static inline struct wrqe *
  807 alloc_wrqe(int wr_len, struct sge_wrq *wrq)
  808 {
  809         int len = offsetof(struct wrqe, wr) + wr_len;
  810         struct wrqe *wr;
  811 
  812         wr = malloc(len, M_CXGBE, M_NOWAIT);
  813         if (__predict_false(wr == NULL))
  814                 return (NULL);
  815         wr->wr_len = wr_len;
  816         wr->wrq = wrq;
  817         return (wr);
  818 }
  819 
  820 static inline void *
  821 wrtod(struct wrqe *wr)
  822 {
  823         return (&wr->wr[0]);
  824 }
  825 
  826 static inline void
  827 free_wrqe(struct wrqe *wr)
  828 {
  829         free(wr, M_CXGBE);
  830 }
  831 
  832 static inline void
  833 t4_wrq_tx(struct adapter *sc, struct wrqe *wr)
  834 {
  835         struct sge_wrq *wrq = wr->wrq;
  836 
  837         TXQ_LOCK(wrq);
  838         t4_wrq_tx_locked(sc, wrq, wr);
  839         TXQ_UNLOCK(wrq);
  840 }
  841 
  842 #endif

Cache object: c7246beedebb1153ca87c95f0f088e0a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.