The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/qlnx/qlnxe/ecore_roce_api.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2017-2018 Cavium, Inc. 
    3  * All rights reserved.
    4  *
    5  *  Redistribution and use in source and binary forms, with or without
    6  *  modification, are permitted provided that the following conditions
    7  *  are met:
    8  *
    9  *  1. Redistributions of source code must retain the above copyright
   10  *     notice, this list of conditions and the following disclaimer.
   11  *  2. Redistributions in binary form must reproduce the above copyright
   12  *     notice, this list of conditions and the following disclaimer in the
   13  *     documentation and/or other materials provided with the distribution.
   14  *
   15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   25  *  POSSIBILITY OF SUCH DAMAGE.
   26  *
   27  * $FreeBSD$
   28  *
   29  */
   30 
   31 #ifndef __ECORE_RDMA_API_H__
   32 #define __ECORE_RDMA_API_H__
   33 
   34 #ifndef ETH_ALEN
   35 #define ETH_ALEN 6
   36 #endif
   37 
   38 enum ecore_roce_ll2_tx_dest
   39 {
   40         ECORE_ROCE_LL2_TX_DEST_NW /* Light L2 TX Destination to the Network */,
   41         ECORE_ROCE_LL2_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
   42         ECORE_ROCE_LL2_TX_DEST_MAX
   43 };
   44 
   45 /* HW/FW RoCE Limitations (external. For internal see ecore_roce.h) */
   46 /* CNQ size Limitation
   47  * The CNQ size should be set as twice the amount of CQs, since for each CQ one
   48  * element may be inserted into the CNQ and another element is used per CQ to
   49  * accommodate for a possible race in the arm mechanism.
   50  * The FW supports a CNQ of 64k-1 and this apparently causes an issue - notice
   51  * that the number of QPs can reach 32k giving 64k CQs and 128k CNQ elements.
   52  * Luckily the FW can buffer CNQ elements avoiding an overflow, on the expense
   53  * of performance.
   54  */
   55 #define ECORE_RDMA_MAX_CNQ_SIZE               (0xFFFF) /* 2^16 - 1 */
   56 
   57 /* rdma interface */
   58 enum ecore_rdma_tid_type
   59 {
   60         ECORE_RDMA_TID_REGISTERED_MR,
   61         ECORE_RDMA_TID_FMR,
   62         ECORE_RDMA_TID_MW_TYPE1,
   63         ECORE_RDMA_TID_MW_TYPE2A
   64 };
   65 
   66 enum ecore_roce_qp_state {
   67         ECORE_ROCE_QP_STATE_RESET, /* Reset */
   68         ECORE_ROCE_QP_STATE_INIT,  /* Initialized */
   69         ECORE_ROCE_QP_STATE_RTR,   /* Ready to Receive */
   70         ECORE_ROCE_QP_STATE_RTS,   /* Ready to Send */
   71         ECORE_ROCE_QP_STATE_SQD,   /* Send Queue Draining */
   72         ECORE_ROCE_QP_STATE_ERR,   /* Error */
   73         ECORE_ROCE_QP_STATE_SQE    /* Send Queue Error */
   74 };
   75 
   76 typedef
   77 void (*affiliated_event_t)(void *context,
   78                            u8   fw_event_code,
   79                            void *fw_handle);
   80 
   81 typedef
   82 void (*unaffiliated_event_t)(void *context,
   83                              u8   event_code);
   84 
   85 struct ecore_rdma_events {
   86         void                    *context;
   87         affiliated_event_t      affiliated_event;
   88         unaffiliated_event_t    unaffiliated_event;
   89 };
   90 
   91 struct ecore_rdma_device {
   92     /* Vendor specific information */
   93         u32     vendor_id;
   94         u32     vendor_part_id;
   95         u32     hw_ver;
   96         u64     fw_ver;
   97 
   98         u64     node_guid; /* node GUID */
   99         u64     sys_image_guid; /* System image GUID */
  100 
  101         u8      max_cnq;
  102         u8      max_sge; /* The maximum number of scatter/gather entries
  103                           * per Work Request supported
  104                           */
  105         u8      max_srq_sge; /* The maximum number of scatter/gather entries
  106                               * per Work Request supported for SRQ
  107                               */
  108         u16     max_inline;
  109         u32     max_wqe; /* The maximum number of outstanding work
  110                           * requests on any Work Queue supported
  111                           */
  112         u32     max_srq_wqe; /* The maximum number of outstanding work
  113                               * requests on any Work Queue supported for SRQ
  114                               */
  115         u8      max_qp_resp_rd_atomic_resc; /* The maximum number of RDMA Reads
  116                                              * & atomic operation that can be
  117                                              * outstanding per QP
  118                                              */
  119 
  120         u8      max_qp_req_rd_atomic_resc; /* The maximum depth per QP for
  121                                             * initiation of RDMA Read
  122                                             * & atomic operations
  123                                             */
  124         u64     max_dev_resp_rd_atomic_resc;
  125         u32     max_cq;
  126         u32     max_qp;
  127         u32     max_srq; /* Maximum number of SRQs */
  128         u32     max_mr; /* Maximum number of MRs supported by this device */
  129         u64     max_mr_size; /* Size (in bytes) of the largest contiguous memory
  130                               * block that can be registered by this device
  131                               */
  132         u32     max_cqe;
  133         u32     max_mw; /* The maximum number of memory windows supported */
  134         u32     max_fmr;
  135         u32     max_mr_mw_fmr_pbl;
  136         u64     max_mr_mw_fmr_size;
  137         u32     max_pd; /* The maximum number of protection domains supported */
  138         u32     max_ah;
  139         u8      max_pkey;
  140         u16     max_srq_wr; /* Maximum number of WRs per SRQ */
  141         u8      max_stats_queues; /* Maximum number of statistics queues */
  142         u32     dev_caps;
  143 
  144         /* Abilty to support RNR-NAK generation */
  145 
  146 #define ECORE_RDMA_DEV_CAP_RNR_NAK_MASK                         0x1
  147 #define ECORE_RDMA_DEV_CAP_RNR_NAK_SHIFT                        0
  148         /* Abilty to support shutdown port */
  149 #define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK                   0x1
  150 #define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT                  1
  151         /* Abilty to support port active event */
  152 #define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK               0x1
  153 #define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT              2
  154         /* Abilty to support port change event */
  155 #define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK               0x1
  156 #define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT              3
  157         /* Abilty to support system image GUID */
  158 #define ECORE_RDMA_DEV_CAP_SYS_IMAGE_MASK                       0x1
  159 #define ECORE_RDMA_DEV_CAP_SYS_IMAGE_SHIFT                      4
  160         /* Abilty to support bad P_Key counter support */
  161 #define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK                    0x1
  162 #define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT                   5
  163         /* Abilty to support atomic operations */
  164 #define ECORE_RDMA_DEV_CAP_ATOMIC_OP_MASK                       0x1
  165 #define ECORE_RDMA_DEV_CAP_ATOMIC_OP_SHIFT                      6
  166 #define ECORE_RDMA_DEV_CAP_RESIZE_CQ_MASK                       0x1
  167 #define ECORE_RDMA_DEV_CAP_RESIZE_CQ_SHIFT                      7
  168         /* Abilty to support modifying the maximum number of
  169          * outstanding work requests per QP
  170          */
  171 #define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK                   0x1
  172 #define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT                  8
  173         /* Abilty to support automatic path migration */
  174 #define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK                   0x1
  175 #define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT                  9
  176         /* Abilty to support the base memory management extensions */
  177 #define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK                 0x1
  178 #define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT                10
  179 #define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK                  0x1
  180 #define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT                 11
  181         /* Abilty to support multipile page sizes per memory region */
  182 #define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK           0x1
  183 #define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT          12
  184         /* Abilty to support block list physical buffer list */
  185 #define ECORE_RDMA_DEV_CAP_BLOCK_MODE_MASK                      0x1
  186 #define ECORE_RDMA_DEV_CAP_BLOCK_MODE_SHIFT                     13
  187         /* Abilty to support zero based virtual addresses */
  188 #define ECORE_RDMA_DEV_CAP_ZBVA_MASK                            0x1
  189 #define ECORE_RDMA_DEV_CAP_ZBVA_SHIFT                           14
  190         /* Abilty to support local invalidate fencing */
  191 #define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK                 0x1
  192 #define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT                15
  193         /* Abilty to support Loopback on QP */
  194 #define ECORE_RDMA_DEV_CAP_LB_INDICATOR_MASK                    0x1
  195 #define ECORE_RDMA_DEV_CAP_LB_INDICATOR_SHIFT                   16
  196         u64     page_size_caps;
  197         u8      dev_ack_delay;
  198         u32     reserved_lkey; /* Value of reserved L_key */
  199         u32     bad_pkey_counter; /* Bad P_key counter support indicator */
  200         struct ecore_rdma_events events;
  201 };
  202 
  203 enum ecore_port_state {
  204         ECORE_RDMA_PORT_UP,
  205         ECORE_RDMA_PORT_DOWN,
  206 };
  207 
  208 enum ecore_roce_capability {
  209         ECORE_ROCE_V1   = 1 << 0,
  210         ECORE_ROCE_V2   = 1 << 1,
  211 };
  212 
  213 struct ecore_rdma_port {
  214         enum ecore_port_state port_state;
  215         int     link_speed;
  216         u64     max_msg_size;
  217         u8      source_gid_table_len;
  218         void    *source_gid_table_ptr;
  219         u8      pkey_table_len;
  220         void    *pkey_table_ptr;
  221         u32     pkey_bad_counter;
  222         enum ecore_roce_capability capability;
  223 };
  224 
  225 struct ecore_rdma_cnq_params
  226 {
  227         u8  num_pbl_pages; /* Number of pages in the PBL allocated
  228                                    * for this queue
  229                                    */
  230         u64 pbl_ptr; /* Address to the first entry of the queue PBL */
  231 };
  232 
  233 /* The CQ Mode affects the CQ doorbell transaction size.
  234  * 64/32 bit machines should configure to 32/16 bits respectively.
  235  */
  236 enum ecore_rdma_cq_mode {
  237         ECORE_RDMA_CQ_MODE_16_BITS,
  238         ECORE_RDMA_CQ_MODE_32_BITS,
  239 };
  240 
  241 struct ecore_roce_dcqcn_params {
  242         u8      notification_point;
  243         u8      reaction_point;
  244 
  245         /* fields for notification point */
  246         u32     cnp_send_timeout;
  247 
  248         /* fields for reaction point */
  249         u32     rl_bc_rate;  /* Byte Counter Limit. */
  250         u16     rl_max_rate; /* Maximum rate in 1.6 Mbps resolution */
  251         u16     rl_r_ai;     /* Active increase rate */
  252         u16     rl_r_hai;    /* Hyper active increase rate */
  253         u16     dcqcn_g;     /* Alpha update gain in 1/64K resolution */
  254         u32     dcqcn_k_us;  /* Alpha update interval */
  255         u32     dcqcn_timeout_us;
  256 };
  257 
  258 #ifdef CONFIG_ECORE_IWARP
  259 
  260 #define ECORE_MPA_RTR_TYPE_NONE         0 /* No RTR type */
  261 #define ECORE_MPA_RTR_TYPE_ZERO_SEND    (1 << 0)
  262 #define ECORE_MPA_RTR_TYPE_ZERO_WRITE   (1 << 1)
  263 #define ECORE_MPA_RTR_TYPE_ZERO_READ    (1 << 2)
  264 
  265 enum ecore_mpa_rev {
  266         ECORE_MPA_REV1,
  267         ECORE_MPA_REV2,
  268 };
  269 
  270 struct ecore_iwarp_params {
  271         u32                             rcv_wnd_size;
  272         u16                             ooo_num_rx_bufs;
  273 #define ECORE_IWARP_TS_EN (1 << 0)
  274 #define ECORE_IWARP_DA_EN (1 << 1)
  275         u8                              flags;
  276         u8                              crc_needed;
  277         enum ecore_mpa_rev              mpa_rev;
  278         u8                              mpa_rtr;
  279         u8                              mpa_peer2peer;
  280 };
  281 
  282 #endif
  283 
  284 struct ecore_roce_params {
  285         enum ecore_rdma_cq_mode         cq_mode;
  286         struct ecore_roce_dcqcn_params  dcqcn_params;
  287         u8                              ll2_handle; /* required for UD QPs */
  288 };
  289 
  290 struct ecore_rdma_start_in_params {
  291         struct ecore_rdma_events        *events;
  292         struct ecore_rdma_cnq_params    cnq_pbl_list[128];
  293         u8                              desired_cnq;
  294         u16                             max_mtu;
  295         u8                              mac_addr[ETH_ALEN];
  296 #ifdef CONFIG_ECORE_IWARP
  297         struct ecore_iwarp_params       iwarp;
  298 #endif
  299         struct ecore_roce_params        roce;
  300 };
  301 
  302 struct ecore_rdma_add_user_out_params {
  303         /* output variables (given to miniport) */
  304         u16     dpi;
  305         u64     dpi_addr;
  306         u64     dpi_phys_addr;
  307         u32     dpi_size;
  308         u16     wid_count;
  309 };
  310 
  311 /*Returns the CQ CID or zero in case of failure */
  312 struct ecore_rdma_create_cq_in_params {
  313         /* input variables (given by miniport) */
  314         u32     cq_handle_lo; /* CQ handle to be written in CNQ */
  315         u32     cq_handle_hi;
  316         u32     cq_size;
  317         u16     dpi;
  318         bool    pbl_two_level;
  319         u64     pbl_ptr;
  320         u16     pbl_num_pages;
  321         u8      pbl_page_size_log; /* for the pages that contain the
  322                            * pointers to the CQ pages
  323                            */
  324         u8      cnq_id;
  325         u16     int_timeout;
  326 };
  327 
  328 struct ecore_rdma_resize_cq_in_params {
  329         /* input variables (given by miniport) */
  330 
  331         u16     icid;
  332         u32     cq_size;
  333         bool    pbl_two_level;
  334         u64     pbl_ptr;
  335         u16     pbl_num_pages;
  336         u8      pbl_page_size_log; /* for the pages that contain the
  337                        * pointers to the CQ pages
  338                        */
  339 };
  340 
  341 enum roce_mode
  342 {
  343         ROCE_V1,
  344         ROCE_V2_IPV4,
  345         ROCE_V2_IPV6,
  346         MAX_ROCE_MODE
  347 };
  348 
  349 struct ecore_rdma_create_qp_in_params {
  350         /* input variables (given by miniport) */
  351         u32     qp_handle_lo; /* QP handle to be written in CQE */
  352         u32     qp_handle_hi;
  353         u32     qp_handle_async_lo; /* QP handle to be written in async event */
  354         u32     qp_handle_async_hi;
  355         bool    use_srq;
  356         bool    signal_all;
  357         bool    fmr_and_reserved_lkey;
  358         u16     pd;
  359         u16     dpi;
  360         u16     sq_cq_id;
  361         u16     sq_num_pages;
  362         u64     sq_pbl_ptr;     /* Not relevant for iWARP */
  363         u8      max_sq_sges;
  364         u16     rq_cq_id;
  365         u16     rq_num_pages;
  366         u64     rq_pbl_ptr;     /* Not relevant for iWARP */
  367         u16     srq_id;
  368         u8      stats_queue;
  369 };
  370 
  371 struct ecore_rdma_create_qp_out_params {
  372         /* output variables (given to miniport) */
  373         u32             qp_id;
  374         u16             icid;
  375         void            *rq_pbl_virt;
  376         dma_addr_t      rq_pbl_phys;
  377         void            *sq_pbl_virt;
  378         dma_addr_t      sq_pbl_phys;
  379 };
  380 
  381 struct ecore_rdma_destroy_cq_in_params {
  382         /* input variables (given by miniport) */
  383         u16 icid;
  384 };
  385 
  386 struct ecore_rdma_destroy_cq_out_params {
  387         /* output variables, provided to the upper layer */
  388 
  389         /* Sequence number of completion notification sent for the CQ on
  390          * the associated CNQ
  391          */
  392         u16     num_cq_notif;
  393 };
  394 
  395 /* ECORE GID can be used as IPv4/6 address in RoCE v2 */
  396 union ecore_gid {
  397         u8 bytes[16];
  398         u16 words[8];
  399         u32 dwords[4];
  400         u64 qwords[2];
  401         u32 ipv4_addr;
  402 };
  403 
  404 struct ecore_rdma_modify_qp_in_params {
  405         /* input variables (given by miniport) */
  406         u32             modify_flags;
  407 #define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK               0x1
  408 #define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT              0
  409 #define ECORE_ROCE_MODIFY_QP_VALID_PKEY_MASK                    0x1
  410 #define ECORE_ROCE_MODIFY_QP_VALID_PKEY_SHIFT                   1
  411 #define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK             0x1
  412 #define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT            2
  413 #define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_MASK                 0x1
  414 #define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT                3
  415 #define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK          0x1
  416 #define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT         4
  417 #define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK                  0x1
  418 #define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT                 5
  419 #define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK                  0x1
  420 #define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT                 6
  421 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK       0x1
  422 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT      7
  423 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK      0x1
  424 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT     8
  425 #define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK             0x1
  426 #define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT            9
  427 #define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK               0x1
  428 #define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT              10
  429 #define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK           0x1
  430 #define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT          11
  431 #define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK       0x1
  432 #define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT      12
  433 #define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK     0x1
  434 #define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT    13
  435 #define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK               0x1
  436 #define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT              14
  437 
  438         enum ecore_roce_qp_state        new_state;
  439         u16             pkey;
  440         bool            incoming_rdma_read_en;
  441         bool            incoming_rdma_write_en;
  442         bool            incoming_atomic_en;
  443         bool            e2e_flow_control_en;
  444         u32             dest_qp;
  445         u16             mtu;
  446         u8              traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
  447         u8              hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
  448         u32             flow_label; /* ignored in IPv4 */
  449         union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
  450         union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
  451         u16             udp_src_port; /* RoCEv2 only */
  452 
  453         u16             vlan_id;
  454 
  455         u32             rq_psn;
  456         u32             sq_psn;
  457         u8              max_rd_atomic_resp;
  458         u8              max_rd_atomic_req;
  459         u32             ack_timeout;
  460         u8              retry_cnt;
  461         u8              rnr_retry_cnt;
  462         u8              min_rnr_nak_timer;
  463         bool            sqd_async;
  464         u8              remote_mac_addr[6];
  465         u8              local_mac_addr[6];
  466         bool            use_local_mac;
  467         enum roce_mode  roce_mode;
  468 };
  469 
  470 struct ecore_rdma_query_qp_out_params {
  471         /* output variables (given to miniport) */
  472         enum ecore_roce_qp_state        state;
  473         u32             rq_psn; /* responder */
  474         u32             sq_psn; /* requester */
  475         bool            draining; /* send queue is draining */
  476         u16             mtu;
  477         u32             dest_qp;
  478         bool            incoming_rdma_read_en;
  479         bool            incoming_rdma_write_en;
  480         bool            incoming_atomic_en;
  481         bool            e2e_flow_control_en;
  482         union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
  483         union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
  484         u32             flow_label; /* ignored in IPv4 */
  485         u8              hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
  486         u8              traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
  487         u32             timeout;
  488         u8              rnr_retry;
  489         u8              retry_cnt;
  490         u8              min_rnr_nak_timer;
  491         u16             pkey_index;
  492         u8              max_rd_atomic;
  493         u8              max_dest_rd_atomic;
  494         bool            sqd_async;
  495 };
  496 
  497 struct ecore_rdma_register_tid_in_params {
  498         /* input variables (given by miniport) */
  499         u32     itid; /* index only, 18 bit long, lkey = itid << 8 | key */
  500         enum ecore_rdma_tid_type tid_type;
  501         u8      key;
  502         u16     pd;
  503         bool    local_read;
  504         bool    local_write;
  505         bool    remote_read;
  506         bool    remote_write;
  507         bool    remote_atomic;
  508         bool    mw_bind;
  509         u64     pbl_ptr;
  510         bool    pbl_two_level;
  511         u8      pbl_page_size_log; /* for the pages that contain the pointers
  512                        * to the MR pages
  513                        */
  514         u8      page_size_log; /* for the MR pages */
  515         u32     fbo;
  516         u64     length; /* only lower 40 bits are valid */
  517         u64     vaddr;
  518         bool    zbva;
  519         bool    phy_mr;
  520         bool    dma_mr;
  521 
  522         /* DIF related fields */
  523         bool    dif_enabled;
  524         u64     dif_error_addr;
  525         u64     dif_runt_addr;
  526 };
  527 
  528 struct ecore_rdma_create_srq_in_params  {
  529         u64 pbl_base_addr;
  530         u64 prod_pair_addr;
  531         u16 num_pages;
  532         u16 pd_id;
  533         u16 page_size;
  534 };
  535 
  536 struct ecore_rdma_create_srq_out_params {
  537         u16 srq_id;
  538 };
  539 
  540 struct ecore_rdma_destroy_srq_in_params {
  541         u16 srq_id;
  542 };
  543 
  544 struct ecore_rdma_modify_srq_in_params {
  545         u32 wqe_limit;
  546         u16 srq_id;
  547 };
  548 
  549 struct ecore_rdma_resize_cq_out_params {
  550         /* output variables, provided to the upper layer */
  551         u32 prod; /* CQ producer value on old PBL */
  552         u32 cons; /* CQ consumer value on old PBL */
  553 };
  554 
  555 struct ecore_rdma_resize_cnq_in_params {
  556         /* input variables (given by miniport) */
  557         u32     cnq_id;
  558         u32     pbl_page_size_log; /* for the pages that contain the
  559                         * pointers to the cnq pages
  560                         */
  561         u64     pbl_ptr;
  562 };
  563 
  564 struct ecore_rdma_stats_out_params {
  565         u64     sent_bytes;
  566         u64     sent_pkts;
  567         u64     rcv_bytes;
  568         u64     rcv_pkts;
  569 
  570         /* RoCE only */
  571         u64     icrc_errors;            /* wraps at 32 bits */
  572         u64     retransmit_events;      /* wraps at 32 bits */
  573         u64     silent_drops;           /* wraps at 16 bits */
  574         u64     rnr_nacks_sent;         /* wraps at 16 bits */
  575 
  576         /* iWARP only */
  577         u64     iwarp_tx_fast_rxmit_cnt;
  578         u64     iwarp_tx_slow_start_cnt;
  579         u64     unalign_rx_comp;
  580 };
  581 
  582 struct ecore_rdma_counters_out_params {
  583         u64     pd_count;
  584         u64     max_pd;
  585         u64     dpi_count;
  586         u64     max_dpi;
  587         u64     cq_count;
  588         u64     max_cq;
  589         u64     qp_count;
  590         u64     max_qp;
  591         u64     tid_count;
  592         u64     max_tid;
  593 };
  594 
  595 enum _ecore_status_t
  596 ecore_rdma_add_user(void *rdma_cxt,
  597                     struct ecore_rdma_add_user_out_params *out_params);
  598 
  599 enum _ecore_status_t
  600 ecore_rdma_alloc_pd(void *rdma_cxt,
  601                     u16 *pd);
  602 
  603 enum _ecore_status_t
  604 ecore_rdma_alloc_tid(void *rdma_cxt,
  605                      u32 *tid);
  606 
  607 enum _ecore_status_t
  608 ecore_rdma_create_cq(void *rdma_cxt,
  609                      struct ecore_rdma_create_cq_in_params *params,
  610                      u16 *icid);
  611 
  612 /* Returns a pointer to the responders' CID, which is also a pointer to the
  613  * ecore_qp_params struct. Returns NULL in case of failure.
  614  */
  615 struct ecore_rdma_qp*
  616 ecore_rdma_create_qp(void *rdma_cxt,
  617                      struct ecore_rdma_create_qp_in_params  *in_params,
  618                      struct ecore_rdma_create_qp_out_params *out_params);
  619 
  620 enum _ecore_status_t
  621 ecore_roce_create_ud_qp(void *rdma_cxt,
  622                         struct ecore_rdma_create_qp_out_params *out_params);
  623 
  624 enum _ecore_status_t
  625 ecore_rdma_deregister_tid(void *rdma_cxt,
  626                           u32           tid);
  627 
  628 enum _ecore_status_t
  629 ecore_rdma_destroy_cq(void *rdma_cxt,
  630                       struct ecore_rdma_destroy_cq_in_params  *in_params,
  631                       struct ecore_rdma_destroy_cq_out_params *out_params);
  632 
  633 enum _ecore_status_t
  634 ecore_rdma_destroy_qp(void *rdma_cxt,
  635                       struct ecore_rdma_qp *qp);
  636 
  637 enum _ecore_status_t
  638 ecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid);
  639 
  640 void
  641 ecore_rdma_free_pd(void *rdma_cxt,
  642                    u16  pd);
  643 
  644 void
  645 ecore_rdma_free_tid(void *rdma_cxt,
  646                     u32 tid);
  647 
  648 enum _ecore_status_t
  649 ecore_rdma_modify_qp(void *rdma_cxt,
  650                      struct ecore_rdma_qp *qp,
  651                      struct ecore_rdma_modify_qp_in_params *params);
  652 
  653 struct ecore_rdma_device*
  654 ecore_rdma_query_device(void *rdma_cxt);
  655 
  656 struct ecore_rdma_port*
  657 ecore_rdma_query_port(void *rdma_cxt);
  658 
  659 enum _ecore_status_t
  660 ecore_rdma_query_qp(void *rdma_cxt,
  661                     struct ecore_rdma_qp                  *qp,
  662                     struct ecore_rdma_query_qp_out_params *out_params);
  663 
  664 enum _ecore_status_t
  665 ecore_rdma_register_tid(void *rdma_cxt,
  666                         struct ecore_rdma_register_tid_in_params *params);
  667 
  668 void ecore_rdma_remove_user(void *rdma_cxt,
  669                             u16         dpi);
  670 
  671 enum _ecore_status_t
  672 ecore_rdma_resize_cnq(void *rdma_cxt,
  673                       struct ecore_rdma_resize_cnq_in_params *in_params);
  674 
  675 /*Returns the CQ CID or zero in case of failure */
  676 enum _ecore_status_t
  677 ecore_rdma_resize_cq(void *rdma_cxt,
  678                      struct ecore_rdma_resize_cq_in_params  *in_params,
  679                      struct ecore_rdma_resize_cq_out_params *out_params);
  680 
  681 /* Before calling rdma_start upper layer (VBD/qed) should fill the
  682  * page-size and mtu in hwfn context
  683  */
  684 enum _ecore_status_t
  685 ecore_rdma_start(void *p_hwfn,
  686                  struct ecore_rdma_start_in_params *params);
  687 
  688 enum _ecore_status_t
  689 ecore_rdma_stop(void *rdma_cxt);
  690 
  691 enum _ecore_status_t
  692 ecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
  693                        struct ecore_rdma_stats_out_params *out_parms);
  694 
  695 enum _ecore_status_t
  696 ecore_rdma_query_counters(void *rdma_cxt,
  697                           struct ecore_rdma_counters_out_params *out_parms);
  698 
  699 u32 ecore_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
  700 
  701 u32 ecore_rdma_query_cau_timer_res(void *p_hwfn);
  702 
  703 void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
  704 
  705 void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn);
  706 
  707 #ifdef CONFIG_ECORE_IWARP
  708 
  709 /* iWARP API */
  710 
  711 enum ecore_iwarp_event_type {
  712         ECORE_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
  713         ECORE_IWARP_EVENT_PASSIVE_COMPLETE, /* Passive side established
  714                                              * ( ack on mpa response )
  715                                              */
  716         ECORE_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
  717         ECORE_IWARP_EVENT_DISCONNECT,
  718         ECORE_IWARP_EVENT_CLOSE,
  719         ECORE_IWARP_EVENT_IRQ_FULL,
  720         ECORE_IWARP_EVENT_RQ_EMPTY,
  721         ECORE_IWARP_EVENT_LLP_TIMEOUT,
  722         ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
  723         ECORE_IWARP_EVENT_CQ_OVERFLOW,
  724         ECORE_IWARP_EVENT_QP_CATASTROPHIC,
  725         ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY,
  726         ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR,
  727         ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR,
  728         ECORE_IWARP_EVENT_TERMINATE_RECEIVED
  729 };
  730 
  731 enum ecore_tcp_ip_version
  732 {
  733         ECORE_TCP_IPV4,
  734         ECORE_TCP_IPV6,
  735 };
  736 
  737 struct ecore_iwarp_cm_info {
  738         enum ecore_tcp_ip_version ip_version;
  739         u32 remote_ip[4];
  740         u32 local_ip[4];
  741         u16 remote_port;
  742         u16 local_port;
  743         u16 vlan;
  744         const void *private_data;
  745         u16 private_data_len;
  746         u8 ord;
  747         u8 ird;
  748 };
  749 
  750 struct ecore_iwarp_cm_event_params {
  751         enum ecore_iwarp_event_type event;
  752         const struct ecore_iwarp_cm_info *cm_info;
  753         void *ep_context; /* To be passed to accept call */
  754         int status;
  755 };
  756 
  757 typedef int (*iwarp_event_handler)(void *context,
  758                                    struct ecore_iwarp_cm_event_params *event);
  759 
  760 /* Active Side Connect Flow:
  761  * upper layer driver calls ecore_iwarp_connect
  762  * Function is blocking: i.e. returns after tcp connection is established
  763  * After MPA connection is established ECORE_IWARP_EVENT_ACTIVE_COMPLETE event
  764  * will be passed to upperlayer driver using the event_cb passed in
  765  * ecore_iwarp_connect_in. Information of the established connection will be
  766  * initialized in event data.
  767  */
  768 struct ecore_iwarp_connect_in {
  769         iwarp_event_handler event_cb;
  770         void *cb_context;
  771         struct ecore_rdma_qp *qp;
  772         struct ecore_iwarp_cm_info cm_info;
  773         u16 mss;
  774         u8 remote_mac_addr[6];
  775         u8 local_mac_addr[6];
  776 };
  777 
  778 struct ecore_iwarp_connect_out {
  779         void *ep_context;
  780 };
  781 
  782 /* Passive side connect flow:
  783  * upper layer driver calls ecore_iwarp_create_listen
  784  * once Syn packet that matches a ip/port that is listened on arrives, ecore
  785  * will offload the tcp connection. After MPA Request is received on the
  786  * offload connection, the event ECORE_IWARP_EVENT_MPA_REQUEST will be sent
  787  * to upper layer driver using the event_cb passed below. The event data
  788  * will be placed in event parameter. After upper layer driver processes the
  789  * event, ecore_iwarp_accept or ecore_iwarp_reject should be called to continue
  790  * MPA negotiation. Once negotiation is complete the event
  791  * ECORE_IWARP_EVENT_PASSIVE_COMPLETE will be passed to the event_cb passed
  792  * originally in ecore_iwarp_listen_in structure.
  793  */
  794 struct ecore_iwarp_listen_in {
  795         iwarp_event_handler event_cb; /* Callback func for delivering events */
  796         void *cb_context; /* passed to event_cb */
  797         u32 max_backlog; /* Max num of pending incoming connection requests */
  798         enum ecore_tcp_ip_version ip_version;
  799         u32 ip_addr[4];
  800         u16 port;
  801         u16 vlan;
  802 };
  803 
  804 struct ecore_iwarp_listen_out {
  805         void *handle; /* to be sent to destroy */
  806 };
  807 
  808 struct ecore_iwarp_accept_in {
  809         void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
  810         void *cb_context; /* context to be passed to event_cb */
  811         struct ecore_rdma_qp *qp;
  812         const void *private_data;
  813         u16 private_data_len;
  814         u8 ord;
  815         u8 ird;
  816 };
  817 
  818 struct ecore_iwarp_reject_in {
  819         void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
  820         void *cb_context; /* context to be passed to event_cb */
  821         const void *private_data;
  822         u16 private_data_len;
  823 };
  824 
  825 struct ecore_iwarp_send_rtr_in {
  826         void *ep_context;
  827 };
  828 
  829 struct ecore_iwarp_tcp_abort_in {
  830         void *ep_context;
  831 };
  832 
  833 enum _ecore_status_t
  834 ecore_iwarp_connect(void *rdma_cxt,
  835                     struct ecore_iwarp_connect_in *iparams,
  836                     struct ecore_iwarp_connect_out *oparams);
  837 
  838 enum _ecore_status_t
  839 ecore_iwarp_create_listen(void *rdma_cxt,
  840                           struct ecore_iwarp_listen_in *iparams,
  841                           struct ecore_iwarp_listen_out *oparams);
  842 
  843 enum _ecore_status_t
  844 ecore_iwarp_accept(void *rdma_cxt,
  845                    struct ecore_iwarp_accept_in *iparams);
  846 
  847 enum _ecore_status_t
  848 ecore_iwarp_reject(void *rdma_cxt,
  849                    struct ecore_iwarp_reject_in *iparams);
  850 
  851 enum _ecore_status_t
  852 ecore_iwarp_destroy_listen(void *rdma_cxt, void *handle);
  853 
  854 enum _ecore_status_t
  855 ecore_iwarp_send_rtr(void *rdma_cxt, struct ecore_iwarp_send_rtr_in *iparams);
  856 
  857 enum _ecore_status_t
  858 ecore_iwarp_tcp_abort(void *rdma_cxt, struct ecore_iwarp_tcp_abort_in *iparams);
  859 
  860 #endif /* CONFIG_ECORE_IWARP */
  861 
  862 #endif

Cache object: ff8b8387b492c9b96acadcb85b7be91c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.