The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
    5  *
    6  * This software is available to you under a choice of one of two
    7  * licenses.  You may choose to be licensed under the terms of the GNU
    8  * General Public License (GPL) Version 2, available from the file
    9  * COPYING in the main directory of this source tree, or the
   10  * OpenIB.org BSD license below:
   11  *
   12  *     Redistribution and use in source and binary forms, with or
   13  *     without modification, are permitted provided that the following
   14  *     conditions are met:
   15  *
   16  *      - Redistributions of source code must retain the above
   17  *        copyright notice, this list of conditions and the following
   18  *        disclaimer.
   19  *      - Redistributions in binary form must reproduce the above
   20  *        copyright notice, this list of conditions and the following
   21  *        disclaimer in the documentation and/or other materials
   22  *        provided with the distribution.
   23  *
   24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   31  * SOFTWARE.
   32  *
   33  * $FreeBSD$
   34  */
   35 #ifndef __IW_CXGB4_H__
   36 #define __IW_CXGB4_H__
   37 
   38 #include <linux/list.h>
   39 #include <linux/spinlock.h>
   40 #include <linux/idr.h>
   41 #include <linux/completion.h>
   42 #include <linux/sched.h>
   43 #include <linux/pci.h>
   44 #include <linux/dma-mapping.h>
   45 #include <linux/wait.h>
   46 #include <linux/kref.h>
   47 #include <linux/timer.h>
   48 #include <linux/io.h>
   49 #include <sys/vmem.h>
   50 
   51 #include <asm/byteorder.h>
   52 
   53 #include <netinet/in.h>
   54 #include <netinet/toecore.h>
   55 
   56 #include <rdma/ib_verbs.h>
   57 #include <rdma/iw_cm.h>
   58 #include <rdma/uverbs_ioctl.h>
   59 
   60 #include "common/common.h"
   61 #include "common/t4_msg.h"
   62 #include "common/t4_regs.h"
   63 #include "common/t4_tcb.h"
   64 #include "t4_l2t.h"
   65 
   66 #define DRV_NAME "iw_cxgbe"
   67 #define MOD DRV_NAME ":"
   68 #define KTR_IW_CXGBE    KTR_SPARE3
   69 
   70 extern int c4iw_debug;
   71 extern int use_dsgl;
   72 extern int inline_threshold;
   73 
   74 #define PDBG(fmt, args...) \
   75 do { \
   76         if (c4iw_debug) \
   77                 printf(MOD fmt, ## args); \
   78 } while (0)
   79 
   80 #include "t4.h"
   81 
   82 static inline void *cplhdr(struct mbuf *m)
   83 {
   84         return mtod(m, void*);
   85 }
   86 
   87 #define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.pbl.start)
   88 #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.rq.start)
   89 
   90 #define C4IW_ID_TABLE_F_RANDOM 1       /* Pseudo-randomize the id's returned */
   91 #define C4IW_ID_TABLE_F_EMPTY  2       /* Table is initially empty */
   92 #define C4IW_MAX_PAGE_SIZE 0x8000000
   93 
   94 struct c4iw_id_table {
   95         u32 flags;
   96         u32 start;              /* logical minimal id */
   97         u32 last;               /* hint for find */
   98         u32 max;
   99         spinlock_t lock;
  100         unsigned long *table;
  101 };
  102 
  103 struct c4iw_resource {
  104         struct c4iw_id_table tpt_table;
  105         struct c4iw_id_table qid_table;
  106         struct c4iw_id_table pdid_table;
  107 };
  108 
  109 struct c4iw_qid_list {
  110         struct list_head entry;
  111         u32 qid;
  112 };
  113 
  114 struct c4iw_dev_ucontext {
  115         struct list_head qpids;
  116         struct list_head cqids;
  117         struct mutex lock;
  118 };
  119 
  120 enum c4iw_rdev_flags {
  121         T4_FATAL_ERROR = (1<<0),
  122         T4_STATUS_PAGE_DISABLED = (1<<1),
  123 };
  124 
  125 struct c4iw_stat {
  126         u64 total;
  127         u64 cur;
  128         u64 max;
  129         u64 fail;
  130 };
  131 
  132 struct c4iw_stats {
  133         struct mutex lock;
  134         struct c4iw_stat qid;
  135         struct c4iw_stat pd;
  136         struct c4iw_stat stag;
  137         struct c4iw_stat pbl;
  138         struct c4iw_stat rqt;
  139 };
  140 
  141 struct c4iw_hw_queue {
  142         int t4_eq_status_entries;
  143         int t4_max_eq_size;
  144         int t4_max_iq_size;
  145         int t4_max_rq_size;
  146         int t4_max_sq_size;
  147         int t4_max_qp_depth;
  148         int t4_max_cq_depth;
  149         int t4_stat_len;
  150 };
  151 
  152 struct c4iw_rdev {
  153         struct adapter *adap;
  154         struct c4iw_resource resource;
  155         unsigned long qpshift;
  156         u32 qpmask;
  157         unsigned long cqshift;
  158         u32 cqmask;
  159         struct c4iw_dev_ucontext uctx;
  160         vmem_t          *rqt_arena;
  161         vmem_t          *pbl_arena;
  162         u32 flags;
  163         struct c4iw_stats stats;
  164         struct c4iw_hw_queue hw_queue;
  165         struct t4_dev_status_page *status_page;
  166         unsigned long bar2_pa;
  167         void __iomem *bar2_kva;
  168         unsigned int bar2_len;
  169         struct workqueue_struct *free_workq;
  170 };
  171 
  172 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
  173 {
  174         return rdev->flags & T4_FATAL_ERROR;
  175 }
  176 
  177 static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
  178 {
  179         return (int)(rdev->adap->vres.stag.size >> 5);
  180 }
  181 
  182 static inline int t4_max_fr_depth(struct c4iw_rdev *rdev, bool use_dsgl)
  183 {
  184         if (rdev->adap->params.ulptx_memwrite_dsgl && use_dsgl)
  185                 return rdev->adap->params.dev_512sgl_mr ? T4_MAX_FR_FW_DSGL_DEPTH : T4_MAX_FR_DSGL_DEPTH;
  186         else
  187                 return T4_MAX_FR_IMMD_DEPTH;
  188 }
  189 
  190 #define C4IW_WR_TO (60*HZ)
  191 
  192 struct c4iw_wr_wait {
  193         int ret;
  194         struct completion completion;
  195 };
  196 
  197 static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
  198 {
  199         wr_waitp->ret = 0;
  200         init_completion(&wr_waitp->completion);
  201 }
  202 
  203 static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
  204 {
  205         wr_waitp->ret = ret;
  206         complete(&wr_waitp->completion);
  207 }
  208 
  209 static inline int
  210 c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp,
  211                 u32 hwtid, u32 qpid, struct socket *so, const char *func)
  212 {
  213         struct adapter *sc = rdev->adap;
  214         unsigned to = C4IW_WR_TO;
  215         int ret;
  216         int timedout = 0;
  217         struct timeval t1, t2;
  218 
  219         if (c4iw_fatal_error(rdev)) {
  220                 wr_waitp->ret = -EIO;
  221                 goto out;
  222         }
  223 
  224         getmicrotime(&t1);
  225         do {
  226                 /* If waiting for reply in rdma_init()/rdma_fini() threads, then
  227                  * check if there are any connection errors.
  228                  */
  229                 if (so && so->so_error) {
  230                         wr_waitp->ret = -ECONNRESET;
  231                         CTR5(KTR_IW_CXGBE, "%s - Connection ERROR %u for sock %p"
  232                             "tid %u qpid %u", func,
  233                             so->so_error, so, hwtid, qpid);
  234                         break;
  235                 }
  236 
  237                 ret = wait_for_completion_timeout(&wr_waitp->completion, to);
  238                 if (!ret) {
  239                         getmicrotime(&t2);
  240                         timevalsub(&t2, &t1);
  241                         printf("%s - Device %s not responding after %ld.%06ld "
  242                             "seconds - tid %u qpid %u\n", func,
  243                             device_get_nameunit(sc->dev), t2.tv_sec, t2.tv_usec,
  244                             hwtid, qpid);
  245                         if (c4iw_fatal_error(rdev)) {
  246                                 wr_waitp->ret = -EIO;
  247                                 break;
  248                         }
  249                         to = to << 2;
  250                         timedout = 1;
  251                 }
  252         } while (!ret);
  253 
  254 out:
  255         if (timedout) {
  256                 getmicrotime(&t2);
  257                 timevalsub(&t2, &t1);
  258                 printf("%s - Device %s reply after %ld.%06ld seconds - "
  259                     "tid %u qpid %u\n", func, device_get_nameunit(sc->dev),
  260                     t2.tv_sec, t2.tv_usec, hwtid, qpid);
  261         }
  262         if (wr_waitp->ret)
  263                 CTR4(KTR_IW_CXGBE, "%p: FW reply %d tid %u qpid %u", sc,
  264                     wr_waitp->ret, hwtid, qpid);
  265         return (wr_waitp->ret);
  266 }
  267 
  268 struct c4iw_dev {
  269         struct ib_device ibdev;
  270         struct pci_dev pdev;
  271         struct c4iw_rdev rdev;
  272         u32 device_cap_flags;
  273         struct idr cqidr;
  274         struct idr qpidr;
  275         struct idr mmidr;
  276         spinlock_t lock;
  277         struct dentry *debugfs_root;
  278         u32 avail_ird;
  279 };
  280 
  281 static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
  282 {
  283         return container_of(ibdev, struct c4iw_dev, ibdev);
  284 }
  285 
  286 static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
  287 {
  288         return container_of(rdev, struct c4iw_dev, rdev);
  289 }
  290 
  291 static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
  292 {
  293         return idr_find(&rhp->cqidr, cqid);
  294 }
  295 
  296 static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
  297 {
  298         return idr_find(&rhp->qpidr, qpid);
  299 }
  300 
  301 static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
  302 {
  303         return idr_find(&rhp->mmidr, mmid);
  304 }
  305 
  306 static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
  307                                  void *handle, u32 id, int lock)
  308 {
  309         int ret;
  310         int newid;
  311 
  312         do {
  313                 if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
  314                         return -ENOMEM;
  315                 if (lock)
  316                         spin_lock_irq(&rhp->lock);
  317                 ret = idr_get_new_above(idr, handle, id, &newid);
  318                 BUG_ON(!ret && newid != id);
  319                 if (lock)
  320                         spin_unlock_irq(&rhp->lock);
  321         } while (ret == -EAGAIN);
  322 
  323         return ret;
  324 }
  325 
  326 static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
  327                                 void *handle, u32 id)
  328 {
  329         return _insert_handle(rhp, idr, handle, id, 1);
  330 }
  331 
  332 static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
  333                                        void *handle, u32 id)
  334 {
  335         return _insert_handle(rhp, idr, handle, id, 0);
  336 }
  337 
  338 static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
  339                                    u32 id, int lock)
  340 {
  341         if (lock)
  342                 spin_lock_irq(&rhp->lock);
  343         idr_remove(idr, id);
  344         if (lock)
  345                 spin_unlock_irq(&rhp->lock);
  346 }
  347 
  348 static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
  349 {
  350         _remove_handle(rhp, idr, id, 1);
  351 }
  352 
  353 static inline void remove_handle_nolock(struct c4iw_dev *rhp,
  354                                          struct idr *idr, u32 id)
  355 {
  356         _remove_handle(rhp, idr, id, 0);
  357 }
  358 
  359 extern int c4iw_max_read_depth;
  360 
  361 static inline int cur_max_read_depth(struct c4iw_dev *dev)
  362 {
  363         return min(dev->rdev.adap->params.max_ordird_qp, c4iw_max_read_depth);
  364 }
  365 
  366 struct c4iw_pd {
  367         struct ib_pd ibpd;
  368         u32 pdid;
  369         struct c4iw_dev *rhp;
  370 };
  371 
  372 static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
  373 {
  374         return container_of(ibpd, struct c4iw_pd, ibpd);
  375 }
  376 
  377 struct tpt_attributes {
  378         u64 len;
  379         u64 va_fbo;
  380         enum fw_ri_mem_perms perms;
  381         u32 stag;
  382         u32 pdid;
  383         u32 qpid;
  384         u32 pbl_addr;
  385         u32 pbl_size;
  386         u32 state:1;
  387         u32 type:2;
  388         u32 rsvd:1;
  389         u32 remote_invaliate_disable:1;
  390         u32 zbva:1;
  391         u32 mw_bind_enable:1;
  392         u32 page_size:5;
  393 };
  394 
  395 struct c4iw_mr {
  396         struct ib_mr ibmr;
  397         struct ib_umem *umem;
  398         struct c4iw_dev *rhp;
  399         u64 kva;
  400         struct tpt_attributes attr;
  401         u64 *mpl;
  402         dma_addr_t mpl_addr;
  403         u32 max_mpl_len;
  404         u32 mpl_len;
  405 };
  406 
  407 static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
  408 {
  409         return container_of(ibmr, struct c4iw_mr, ibmr);
  410 }
  411 
  412 struct c4iw_mw {
  413         struct ib_mw ibmw;
  414         struct c4iw_dev *rhp;
  415         u64 kva;
  416         struct tpt_attributes attr;
  417 };
  418 
  419 static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
  420 {
  421         return container_of(ibmw, struct c4iw_mw, ibmw);
  422 }
  423 
  424 struct c4iw_cq {
  425         struct ib_cq ibcq;
  426         struct c4iw_dev *rhp;
  427         struct t4_cq cq;
  428         spinlock_t lock;
  429         spinlock_t comp_handler_lock;
  430         atomic_t refcnt;
  431         wait_queue_head_t wait;
  432 };
  433 
  434 static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
  435 {
  436         return container_of(ibcq, struct c4iw_cq, ibcq);
  437 }
  438 
  439 struct c4iw_mpa_attributes {
  440         u8 initiator;
  441         u8 recv_marker_enabled;
  442         u8 xmit_marker_enabled;
  443         u8 crc_enabled;
  444         u8 enhanced_rdma_conn;
  445         u8 version;
  446         u8 p2p_type;
  447 };
  448 
  449 struct c4iw_qp_attributes {
  450         u32 scq;
  451         u32 rcq;
  452         u32 sq_num_entries;
  453         u32 rq_num_entries;
  454         u32 sq_max_sges;
  455         u32 sq_max_sges_rdma_write;
  456         u32 rq_max_sges;
  457         u32 state;
  458         u8 enable_rdma_read;
  459         u8 enable_rdma_write;
  460         u8 enable_bind;
  461         u8 enable_mmid0_fastreg;
  462         u32 max_ord;
  463         u32 max_ird;
  464         u32 pd;
  465         u32 next_state;
  466         char terminate_buffer[52];
  467         u32 terminate_msg_len;
  468         u8 is_terminate_local;
  469         struct c4iw_mpa_attributes mpa_attr;
  470         struct c4iw_ep *llp_stream_handle;
  471         u8 layer_etype;
  472         u8 ecode;
  473         u16 sq_db_inc;
  474         u16 rq_db_inc;
  475         u8 send_term;
  476 };
  477 
  478 struct c4iw_ib_srq {
  479         struct ib_srq ibsrq;
  480 };
  481 
  482 struct c4iw_ib_ah {
  483         struct ib_ah ibah;
  484 };
  485 
  486 struct c4iw_qp {
  487         struct ib_qp ibqp;
  488         struct c4iw_dev *rhp;
  489         struct c4iw_ep *ep;
  490         struct c4iw_qp_attributes attr;
  491         struct t4_wq wq;
  492         spinlock_t lock;
  493         struct mutex mutex;
  494         struct kref kref;
  495         wait_queue_head_t wait;
  496         struct timer_list timer;
  497         int sq_sig_all;
  498         struct work_struct free_work;
  499         struct c4iw_ucontext *ucontext;
  500 };
  501 
  502 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
  503 {
  504         return container_of(ibqp, struct c4iw_qp, ibqp);
  505 }
  506 
  507 struct c4iw_ucontext {
  508         struct ib_ucontext ibucontext;
  509         struct c4iw_dev_ucontext uctx;
  510         u32 key;
  511         spinlock_t mmap_lock;
  512         struct list_head mmaps;
  513 };
  514 
  515 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
  516 {
  517         return container_of(c, struct c4iw_ucontext, ibucontext);
  518 }
  519 
  520 struct c4iw_mm_entry {
  521         struct list_head entry;
  522         u64 addr;
  523         u32 key;
  524         unsigned len;
  525 };
  526 
  527 static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
  528                                                 u32 key, unsigned len)
  529 {
  530         struct list_head *pos, *nxt;
  531         struct c4iw_mm_entry *mm;
  532 
  533         spin_lock(&ucontext->mmap_lock);
  534         list_for_each_safe(pos, nxt, &ucontext->mmaps) {
  535 
  536                 mm = list_entry(pos, struct c4iw_mm_entry, entry);
  537                 if (mm->key == key && mm->len == len) {
  538                         list_del_init(&mm->entry);
  539                         spin_unlock(&ucontext->mmap_lock);
  540                         CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d",
  541                              __func__, key, (unsigned long long) mm->addr,
  542                              mm->len);
  543                         return mm;
  544                 }
  545         }
  546         spin_unlock(&ucontext->mmap_lock);
  547         return NULL;
  548 }
  549 
  550 static inline void insert_mmap(struct c4iw_ucontext *ucontext,
  551                                struct c4iw_mm_entry *mm)
  552 {
  553         spin_lock(&ucontext->mmap_lock);
  554         CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d", __func__, mm->key,
  555             (unsigned long long) mm->addr, mm->len);
  556         list_add_tail(&mm->entry, &ucontext->mmaps);
  557         spin_unlock(&ucontext->mmap_lock);
  558 }
  559 
  560 enum c4iw_qp_attr_mask {
  561         C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
  562         C4IW_QP_ATTR_SQ_DB = 1<<1,
  563         C4IW_QP_ATTR_RQ_DB = 1<<2,
  564         C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
  565         C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
  566         C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
  567         C4IW_QP_ATTR_MAX_ORD = 1 << 11,
  568         C4IW_QP_ATTR_MAX_IRD = 1 << 12,
  569         C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
  570         C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
  571         C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
  572         C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
  573         C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
  574                                      C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
  575                                      C4IW_QP_ATTR_MAX_ORD |
  576                                      C4IW_QP_ATTR_MAX_IRD |
  577                                      C4IW_QP_ATTR_LLP_STREAM_HANDLE |
  578                                      C4IW_QP_ATTR_STREAM_MSG_BUFFER |
  579                                      C4IW_QP_ATTR_MPA_ATTR |
  580                                      C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
  581 };
  582 
  583 int c4iw_modify_qp(struct c4iw_dev *rhp,
  584                                 struct c4iw_qp *qhp,
  585                                 enum c4iw_qp_attr_mask mask,
  586                                 struct c4iw_qp_attributes *attrs,
  587                                 int internal);
  588 
  589 enum c4iw_qp_state {
  590         C4IW_QP_STATE_IDLE,
  591         C4IW_QP_STATE_RTS,
  592         C4IW_QP_STATE_ERROR,
  593         C4IW_QP_STATE_TERMINATE,
  594         C4IW_QP_STATE_CLOSING,
  595         C4IW_QP_STATE_TOT
  596 };
  597 
  598 /*
  599  * IW_CXGBE event bits.
  600  * These bits are used for handling all events for a particular 'ep' serially.
  601  */
  602 #define C4IW_EVENT_SOCKET       0x0001
  603 #define C4IW_EVENT_TIMEOUT      0x0002
  604 #define C4IW_EVENT_TERM         0x0004
  605 
  606 static inline int c4iw_convert_state(enum ib_qp_state ib_state)
  607 {
  608         switch (ib_state) {
  609         case IB_QPS_RESET:
  610         case IB_QPS_INIT:
  611                 return C4IW_QP_STATE_IDLE;
  612         case IB_QPS_RTS:
  613                 return C4IW_QP_STATE_RTS;
  614         case IB_QPS_SQD:
  615                 return C4IW_QP_STATE_CLOSING;
  616         case IB_QPS_SQE:
  617                 return C4IW_QP_STATE_TERMINATE;
  618         case IB_QPS_ERR:
  619                 return C4IW_QP_STATE_ERROR;
  620         default:
  621                 return -1;
  622         }
  623 }
  624 
  625 static inline int to_ib_qp_state(int c4iw_qp_state)
  626 {
  627         switch (c4iw_qp_state) {
  628         case C4IW_QP_STATE_IDLE:
  629                 return IB_QPS_INIT;
  630         case C4IW_QP_STATE_RTS:
  631                 return IB_QPS_RTS;
  632         case C4IW_QP_STATE_CLOSING:
  633                 return IB_QPS_SQD;
  634         case C4IW_QP_STATE_TERMINATE:
  635                 return IB_QPS_SQE;
  636         case C4IW_QP_STATE_ERROR:
  637                 return IB_QPS_ERR;
  638         }
  639         return IB_QPS_ERR;
  640 }
  641 
  642 #define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
  643 
  644 static inline u32 c4iw_ib_to_tpt_access(int a)
  645 {
  646         return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
  647                (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
  648                (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
  649                FW_RI_MEM_ACCESS_LOCAL_READ;
  650 }
  651 
  652 static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
  653 {
  654         return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
  655                (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
  656 }
  657 
  658 enum c4iw_mmid_state {
  659         C4IW_STAG_STATE_VALID,
  660         C4IW_STAG_STATE_INVALID
  661 };
  662 
  663 #define C4IW_NODE_DESC "iw_cxgbe Chelsio Communications"
  664 
  665 #define MPA_KEY_REQ "MPA ID Req Frame"
  666 #define MPA_KEY_REP "MPA ID Rep Frame"
  667 
  668 #define MPA_MAX_PRIVATE_DATA    256
  669 #define MPA_ENHANCED_RDMA_CONN  0x10
  670 #define MPA_REJECT              0x20
  671 #define MPA_CRC                 0x40
  672 #define MPA_MARKERS             0x80
  673 #define MPA_FLAGS_MASK          0xE0
  674 
  675 #define MPA_V2_PEER2PEER_MODEL          0x8000
  676 #define MPA_V2_ZERO_LEN_FPDU_RTR        0x4000
  677 #define MPA_V2_RDMA_WRITE_RTR           0x8000
  678 #define MPA_V2_RDMA_READ_RTR            0x4000
  679 #define MPA_V2_IRD_ORD_MASK             0x3FFF
  680 
  681 #define c4iw_put_ep(ep) { \
  682         CTR4(KTR_IW_CXGBE, "put_ep (%s:%u) ep %p, refcnt %d", \
  683              __func__, __LINE__, ep, kref_read(&(ep)->kref)); \
  684         WARN_ON(kref_read(&(ep)->kref) < 1); \
  685         kref_put(&((ep)->kref), _c4iw_free_ep); \
  686 }
  687 
  688 #define c4iw_get_ep(ep) { \
  689         CTR4(KTR_IW_CXGBE, "get_ep (%s:%u) ep %p, refcnt %d", \
  690               __func__, __LINE__, ep, kref_read(&(ep)->kref)); \
  691         kref_get(&((ep)->kref));  \
  692 }
  693 
  694 void _c4iw_free_ep(struct kref *kref);
  695 
  696 struct mpa_message {
  697         u8 key[16];
  698         u8 flags;
  699         u8 revision;
  700         __be16 private_data_size;
  701         u8 private_data[0];
  702 };
  703 
  704 struct mpa_v2_conn_params {
  705         __be16 ird;
  706         __be16 ord;
  707 };
  708 
  709 struct terminate_message {
  710         u8 layer_etype;
  711         u8 ecode;
  712         __be16 hdrct_rsvd;
  713         u8 len_hdrs[0];
  714 };
  715 
  716 #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
  717 
  718 enum c4iw_layers_types {
  719         LAYER_RDMAP             = 0x00,
  720         LAYER_DDP               = 0x10,
  721         LAYER_MPA               = 0x20,
  722         RDMAP_LOCAL_CATA        = 0x00,
  723         RDMAP_REMOTE_PROT       = 0x01,
  724         RDMAP_REMOTE_OP         = 0x02,
  725         DDP_LOCAL_CATA          = 0x00,
  726         DDP_TAGGED_ERR          = 0x01,
  727         DDP_UNTAGGED_ERR        = 0x02,
  728         DDP_LLP                 = 0x03
  729 };
  730 
  731 enum c4iw_rdma_ecodes {
  732         RDMAP_INV_STAG          = 0x00,
  733         RDMAP_BASE_BOUNDS       = 0x01,
  734         RDMAP_ACC_VIOL          = 0x02,
  735         RDMAP_STAG_NOT_ASSOC    = 0x03,
  736         RDMAP_TO_WRAP           = 0x04,
  737         RDMAP_INV_VERS          = 0x05,
  738         RDMAP_INV_OPCODE        = 0x06,
  739         RDMAP_STREAM_CATA       = 0x07,
  740         RDMAP_GLOBAL_CATA       = 0x08,
  741         RDMAP_CANT_INV_STAG     = 0x09,
  742         RDMAP_UNSPECIFIED       = 0xff
  743 };
  744 
  745 enum c4iw_ddp_ecodes {
  746         DDPT_INV_STAG           = 0x00,
  747         DDPT_BASE_BOUNDS        = 0x01,
  748         DDPT_STAG_NOT_ASSOC     = 0x02,
  749         DDPT_TO_WRAP            = 0x03,
  750         DDPT_INV_VERS           = 0x04,
  751         DDPU_INV_QN             = 0x01,
  752         DDPU_INV_MSN_NOBUF      = 0x02,
  753         DDPU_INV_MSN_RANGE      = 0x03,
  754         DDPU_INV_MO             = 0x04,
  755         DDPU_MSG_TOOBIG         = 0x05,
  756         DDPU_INV_VERS           = 0x06
  757 };
  758 
  759 enum c4iw_mpa_ecodes {
  760         MPA_CRC_ERR             = 0x02,
  761         MPA_MARKER_ERR          = 0x03,
  762         MPA_LOCAL_CATA          = 0x05,
  763         MPA_INSUFF_IRD          = 0x06,
  764         MPA_NOMATCH_RTR         = 0x07,
  765 };
  766 
  767 enum c4iw_ep_state {
  768         IDLE = 0,
  769         LISTEN,
  770         CONNECTING,
  771         MPA_REQ_WAIT,
  772         MPA_REQ_SENT,
  773         MPA_REQ_RCVD,
  774         MPA_REP_SENT,
  775         FPDU_MODE,
  776         ABORTING,
  777         CLOSING,
  778         MORIBUND,
  779         DEAD,
  780 };
  781 
  782 enum c4iw_ep_flags {
  783         PEER_ABORT_IN_PROGRESS  = 0,
  784         ABORT_REQ_IN_PROGRESS   = 1,
  785         RELEASE_RESOURCES       = 2,
  786         CLOSE_SENT              = 3,
  787         TIMEOUT                 = 4,
  788         QP_REFERENCED           = 5,
  789         STOP_MPA_TIMER          = 7,
  790 };
  791 
  792 enum c4iw_ep_history {
  793         ACT_OPEN_REQ            = 0,
  794         ACT_OFLD_CONN           = 1,
  795         ACT_OPEN_RPL            = 2,
  796         ACT_ESTAB               = 3,
  797         PASS_ACCEPT_REQ         = 4,
  798         PASS_ESTAB              = 5,
  799         ABORT_UPCALL            = 6,
  800         ESTAB_UPCALL            = 7,
  801         CLOSE_UPCALL            = 8,
  802         ULP_ACCEPT              = 9,
  803         ULP_REJECT              = 10,
  804         TIMEDOUT                = 11,
  805         PEER_ABORT              = 12,
  806         PEER_CLOSE              = 13,
  807         CONNREQ_UPCALL          = 14,
  808         ABORT_CONN              = 15,
  809         DISCONN_UPCALL          = 16,
  810         EP_DISC_CLOSE           = 17,
  811         EP_DISC_ABORT           = 18,
  812         CONN_RPL_UPCALL         = 19,
  813         ACT_RETRY_NOMEM         = 20,
  814         ACT_RETRY_INUSE         = 21,
  815         CLOSE_CON_RPL           = 22,
  816         EP_DISC_FAIL            = 24,
  817         QP_REFED                = 25,
  818         QP_DEREFED              = 26,
  819         CM_ID_REFED             = 27,
  820         CM_ID_DEREFED           = 28
  821 };
  822 
  823 struct c4iw_ep_common {
  824         TAILQ_ENTRY(c4iw_ep_common) entry;      /* Work queue attachment */
  825         struct iw_cm_id *cm_id;
  826         struct c4iw_qp *qp;
  827         struct c4iw_dev *dev;
  828         enum c4iw_ep_state state;
  829         struct kref kref;
  830         struct mutex mutex;
  831         struct sockaddr_storage local_addr;
  832         struct sockaddr_storage remote_addr;
  833         struct c4iw_wr_wait wr_wait;
  834         unsigned long flags;
  835         unsigned long history;
  836         int rpl_err;
  837         int rpl_done;
  838         struct thread *thread;
  839         struct socket *so;
  840         int ep_events;
  841 };
  842 
  843 struct c4iw_listen_ep {
  844         struct c4iw_ep_common com;
  845         unsigned int stid;
  846         int backlog;
  847         struct list_head listen_ep_list;  /* list of all listener ep's bound
  848                                              to one port address */
  849 };
  850 
  851 struct c4iw_ep {
  852         struct c4iw_ep_common com;
  853         struct c4iw_listen_ep *parent_ep;
  854         struct timer_list timer;
  855         unsigned int atid;
  856         u32 hwtid;
  857         u32 snd_seq;
  858         u32 rcv_seq;
  859         struct l2t_entry *l2t;
  860         struct dst_entry *dst;
  861         struct c4iw_mpa_attributes mpa_attr;
  862         u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
  863         unsigned int mpa_pkt_len;
  864         u32 ird;
  865         u32 ord;
  866         u32 tx_chan;
  867         u32 mtu;
  868         u16 mss;
  869         u16 plen;
  870         u16 rss_qid;
  871         u16 txq_idx;
  872         u16 ctrlq_idx;
  873         u8 tos;
  874         u8 retry_with_mpa_v1;
  875         u8 tried_with_mpa_v1;
  876 };
  877 
  878 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
  879 {
  880         return cm_id->provider_data;
  881 }
  882 
  883 static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
  884 {
  885         return cm_id->provider_data;
  886 }
  887 
  888 static inline int compute_wscale(int win)
  889 {
  890         int wscale = 0;
  891 
  892         while (wscale < 14 && (65535<<wscale) < win)
  893                 wscale++;
  894         return wscale;
  895 }
  896 
  897 u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
  898 void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
  899 int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
  900                         u32 reserved, u32 flags);
  901 void c4iw_id_table_free(struct c4iw_id_table *alloc);
  902 
  903 typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct mbuf *m);
  904 
  905 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
  906                      struct l2t_entry *l2t);
  907 u32 c4iw_get_resource(struct c4iw_id_table *id_table);
  908 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
  909 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
  910 int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
  911 int c4iw_pblpool_create(struct c4iw_rdev *rdev);
  912 int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
  913 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
  914 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
  915 void c4iw_destroy_resource(struct c4iw_resource *rscp);
  916 int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
  917 int c4iw_register_device(struct c4iw_dev *dev);
  918 void c4iw_unregister_device(struct c4iw_dev *dev);
  919 int __init c4iw_cm_init(void);
  920 void __exit c4iw_cm_term(void);
  921 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
  922                                struct c4iw_dev_ucontext *uctx);
  923 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
  924                             struct c4iw_dev_ucontext *uctx);
  925 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
  926 int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
  927                       const struct ib_send_wr **bad_wr);
  928 int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
  929                       const struct ib_recv_wr **bad_wr);
  930 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
  931 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
  932 int c4iw_destroy_listen(struct iw_cm_id *cm_id);
  933 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
  934 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
  935 void c4iw_qp_add_ref(struct ib_qp *qp);
  936 void c4iw_qp_rem_ref(struct ib_qp *qp);
  937 struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
  938                 u32 max_num_sg, struct ib_udata *udata);
  939 int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
  940                 int sg_nents, unsigned int *sg_offset);
  941 int c4iw_dealloc_mw(struct ib_mw *mw);
  942 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
  943                 struct ib_udata *udata);
  944 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64
  945                 virt, int acc, struct ib_udata *udata);
  946 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
  947 int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
  948 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
  949 void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
  950 int c4iw_create_cq(struct ib_cq *ibcq,
  951                    const struct ib_cq_init_attr *attr,
  952                    struct ib_udata *udata);
  953 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
  954 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
  955 int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
  956 struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
  957                              struct ib_qp_init_attr *attrs,
  958                              struct ib_udata *udata);
  959 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  960                                  int attr_mask, struct ib_udata *udata);
  961 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  962                      int attr_mask, struct ib_qp_init_attr *init_attr);
  963 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
  964 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
  965 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
  966 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
  967 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
  968 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct mbuf *m);
  969 void c4iw_flush_hw_cq(struct c4iw_cq *cq);
  970 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
  971 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
  972 int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
  973 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
  974 int c4iw_flush_sq(struct c4iw_qp *qhp);
  975 int c4iw_ev_handler(struct sge_iq *, const struct rsp_ctrl *);
  976 u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
  977 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
  978 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
  979 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
  980                 struct c4iw_dev_ucontext *uctx);
  981 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
  982 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
  983                 struct c4iw_dev_ucontext *uctx);
  984 void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
  985 #endif

Cache object: e8f55bbd5493aadfdd81570b06b054fb


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.