The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mthca/mthca_srq.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2005 Cisco Systems. All rights reserved.
    3  *
    4  * This software is available to you under a choice of one of two
    5  * licenses.  You may choose to be licensed under the terms of the GNU
    6  * General Public License (GPL) Version 2, available from the file
    7  * COPYING in the main directory of this source tree, or the
    8  * OpenIB.org BSD license below:
    9  *
   10  *     Redistribution and use in source and binary forms, with or
   11  *     without modification, are permitted provided that the following
   12  *     conditions are met:
   13  *
   14  *      - Redistributions of source code must retain the above
   15  *        copyright notice, this list of conditions and the following
   16  *        disclaimer.
   17  *
   18  *      - Redistributions in binary form must reproduce the above
   19  *        copyright notice, this list of conditions and the following
   20  *        disclaimer in the documentation and/or other materials
   21  *        provided with the distribution.
   22  *
   23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   30  * SOFTWARE.
   31  */
   32 
   33 #include <linux/slab.h>
   34 #include <linux/string.h>
   35 #include <linux/sched.h>
   36 
   37 #include <asm/io.h>
   38 
   39 #include <rdma/uverbs_ioctl.h>
   40 
   41 #include "mthca_dev.h"
   42 #include "mthca_cmd.h"
   43 #include "mthca_memfree.h"
   44 #include "mthca_wqe.h"
   45 
   46 enum {
   47         MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
   48 };
   49 
   50 struct mthca_tavor_srq_context {
   51         __be64 wqe_base_ds;     /* low 6 bits is descriptor size */
   52         __be32 state_pd;
   53         __be32 lkey;
   54         __be32 uar;
   55         __be16 limit_watermark;
   56         __be16 wqe_cnt;
   57         u32    reserved[2];
   58 };
   59 
   60 struct mthca_arbel_srq_context {
   61         __be32 state_logsize_srqn;
   62         __be32 lkey;
   63         __be32 db_index;
   64         __be32 logstride_usrpage;
   65         __be64 wqe_base;
   66         __be32 eq_pd;
   67         __be16 limit_watermark;
   68         __be16 wqe_cnt;
   69         u16    reserved1;
   70         __be16 wqe_counter;
   71         u32    reserved2[3];
   72 };
   73 
   74 static void *get_wqe(struct mthca_srq *srq, int n)
   75 {
   76         if (srq->is_direct)
   77                 return srq->queue.direct.buf + (n << srq->wqe_shift);
   78         else
   79                 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
   80                         ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
   81 }
   82 
   83 /*
   84  * Return a pointer to the location within a WQE that we're using as a
   85  * link when the WQE is in the free list.  We use the imm field
   86  * because in the Tavor case, posting a WQE may overwrite the next
   87  * segment of the previous WQE, but a receive WQE will never touch the
   88  * imm field.  This avoids corrupting our free list if the previous
   89  * WQE has already completed and been put on the free list when we
   90  * post the next WQE.
   91  */
   92 static inline int *wqe_to_link(void *wqe)
   93 {
   94         return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
   95 }
   96 
   97 static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
   98                                          struct mthca_pd *pd,
   99                                          struct mthca_srq *srq,
  100                                          struct mthca_tavor_srq_context *context,
  101                                          struct ib_udata *udata)
  102 {
  103         struct mthca_ucontext *ucontext = rdma_udata_to_drv_context(
  104                 udata, struct mthca_ucontext, ibucontext);
  105 
  106         memset(context, 0, sizeof *context);
  107 
  108         context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
  109         context->state_pd    = cpu_to_be32(pd->pd_num);
  110         context->lkey        = cpu_to_be32(srq->mr.ibmr.lkey);
  111 
  112         if (udata)
  113                 context->uar = cpu_to_be32(ucontext->uar.index);
  114         else
  115                 context->uar = cpu_to_be32(dev->driver_uar.index);
  116 }
  117 
  118 static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
  119                                          struct mthca_pd *pd,
  120                                          struct mthca_srq *srq,
  121                                          struct mthca_arbel_srq_context *context,
  122                                          struct ib_udata *udata)
  123 {
  124         struct mthca_ucontext *ucontext = rdma_udata_to_drv_context(
  125                 udata, struct mthca_ucontext, ibucontext);
  126         int logsize;
  127 
  128         memset(context, 0, sizeof *context);
  129         logsize = ilog2(srq->max);
  130         context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
  131         context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
  132         context->db_index = cpu_to_be32(srq->db_index);
  133         context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
  134         if (udata)
  135                 context->logstride_usrpage |= cpu_to_be32(ucontext->uar.index);
  136         else
  137                 context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
  138         context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
  139 }
  140 
  141 static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
  142 {
  143         mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
  144                        srq->is_direct, &srq->mr);
  145         kfree(srq->wrid);
  146 }
  147 
  148 static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
  149                                struct mthca_srq *srq, struct ib_udata *udata)
  150 {
  151         struct mthca_data_seg *scatter;
  152         void *wqe;
  153         int err;
  154         int i;
  155 
  156         if (udata)
  157                 return 0;
  158 
  159         srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
  160         if (!srq->wrid)
  161                 return -ENOMEM;
  162 
  163         err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
  164                               MTHCA_MAX_DIRECT_SRQ_SIZE,
  165                               &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
  166         if (err) {
  167                 kfree(srq->wrid);
  168                 return err;
  169         }
  170 
  171         /*
  172          * Now initialize the SRQ buffer so that all of the WQEs are
  173          * linked into the list of free WQEs.  In addition, set the
  174          * scatter list L_Keys to the sentry value of 0x100.
  175          */
  176         for (i = 0; i < srq->max; ++i) {
  177                 struct mthca_next_seg *next;
  178 
  179                 next = wqe = get_wqe(srq, i);
  180 
  181                 if (i < srq->max - 1) {
  182                         *wqe_to_link(wqe) = i + 1;
  183                         next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
  184                 } else {
  185                         *wqe_to_link(wqe) = -1;
  186                         next->nda_op = 0;
  187                 }
  188 
  189                 for (scatter = wqe + sizeof (struct mthca_next_seg);
  190                      (void *) scatter < wqe + (1 << srq->wqe_shift);
  191                      ++scatter)
  192                         scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
  193         }
  194 
  195         srq->last = get_wqe(srq, srq->max - 1);
  196 
  197         return 0;
  198 }
  199 
  200 int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
  201                     struct ib_srq_attr *attr, struct mthca_srq *srq,
  202                     struct ib_udata *udata)
  203 {
  204         struct mthca_mailbox *mailbox;
  205         int ds;
  206         int err;
  207 
  208         /* Sanity check SRQ size before proceeding */
  209         if (attr->max_wr  > dev->limits.max_srq_wqes ||
  210             attr->max_sge > dev->limits.max_srq_sge)
  211                 return -EINVAL;
  212 
  213         srq->max      = attr->max_wr;
  214         srq->max_gs   = attr->max_sge;
  215         srq->counter  = 0;
  216 
  217         if (mthca_is_memfree(dev))
  218                 srq->max = roundup_pow_of_two(srq->max + 1);
  219         else
  220                 srq->max = srq->max + 1;
  221 
  222         ds = max(64UL,
  223                  roundup_pow_of_two(sizeof (struct mthca_next_seg) +
  224                                     srq->max_gs * sizeof (struct mthca_data_seg)));
  225 
  226         if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
  227                 return -EINVAL;
  228 
  229         srq->wqe_shift = ilog2(ds);
  230 
  231         srq->srqn = mthca_alloc(&dev->srq_table.alloc);
  232         if (srq->srqn == -1)
  233                 return -ENOMEM;
  234 
  235         if (mthca_is_memfree(dev)) {
  236                 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
  237                 if (err)
  238                         goto err_out;
  239 
  240                 if (!udata) {
  241                         srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
  242                                                        srq->srqn, &srq->db);
  243                         if (srq->db_index < 0) {
  244                                 err = -ENOMEM;
  245                                 goto err_out_icm;
  246                         }
  247                 }
  248         }
  249 
  250         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
  251         if (IS_ERR(mailbox)) {
  252                 err = PTR_ERR(mailbox);
  253                 goto err_out_db;
  254         }
  255 
  256         err = mthca_alloc_srq_buf(dev, pd, srq, udata);
  257         if (err)
  258                 goto err_out_mailbox;
  259 
  260         spin_lock_init(&srq->lock);
  261         srq->refcount = 1;
  262         init_waitqueue_head(&srq->wait);
  263         mutex_init(&srq->mutex);
  264 
  265         if (mthca_is_memfree(dev))
  266                 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf, udata);
  267         else
  268                 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf, udata);
  269 
  270         err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn);
  271 
  272         if (err) {
  273                 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
  274                 goto err_out_free_buf;
  275         }
  276 
  277         spin_lock_irq(&dev->srq_table.lock);
  278         if (mthca_array_set(&dev->srq_table.srq,
  279                             srq->srqn & (dev->limits.num_srqs - 1),
  280                             srq)) {
  281                 spin_unlock_irq(&dev->srq_table.lock);
  282                 goto err_out_free_srq;
  283         }
  284         spin_unlock_irq(&dev->srq_table.lock);
  285 
  286         mthca_free_mailbox(dev, mailbox);
  287 
  288         srq->first_free = 0;
  289         srq->last_free  = srq->max - 1;
  290 
  291         attr->max_wr    = srq->max - 1;
  292         attr->max_sge   = srq->max_gs;
  293 
  294         return 0;
  295 
  296 err_out_free_srq:
  297         err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
  298         if (err)
  299                 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
  300 
  301 err_out_free_buf:
  302         if (!udata)
  303                 mthca_free_srq_buf(dev, srq);
  304 
  305 err_out_mailbox:
  306         mthca_free_mailbox(dev, mailbox);
  307 
  308 err_out_db:
  309         if (!udata && mthca_is_memfree(dev))
  310                 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
  311 
  312 err_out_icm:
  313         mthca_table_put(dev, dev->srq_table.table, srq->srqn);
  314 
  315 err_out:
  316         mthca_free(&dev->srq_table.alloc, srq->srqn);
  317 
  318         return err;
  319 }
  320 
  321 static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
  322 {
  323         int c;
  324 
  325         spin_lock_irq(&dev->srq_table.lock);
  326         c = srq->refcount;
  327         spin_unlock_irq(&dev->srq_table.lock);
  328 
  329         return c;
  330 }
  331 
  332 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
  333 {
  334         struct mthca_mailbox *mailbox;
  335         int err;
  336 
  337         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
  338         if (IS_ERR(mailbox)) {
  339                 mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
  340                 return;
  341         }
  342 
  343         err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
  344         if (err)
  345                 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
  346 
  347         spin_lock_irq(&dev->srq_table.lock);
  348         mthca_array_clear(&dev->srq_table.srq,
  349                           srq->srqn & (dev->limits.num_srqs - 1));
  350         --srq->refcount;
  351         spin_unlock_irq(&dev->srq_table.lock);
  352 
  353         wait_event(srq->wait, !get_srq_refcount(dev, srq));
  354 
  355         if (!srq->ibsrq.uobject) {
  356                 mthca_free_srq_buf(dev, srq);
  357                 if (mthca_is_memfree(dev))
  358                         mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
  359         }
  360 
  361         mthca_table_put(dev, dev->srq_table.table, srq->srqn);
  362         mthca_free(&dev->srq_table.alloc, srq->srqn);
  363         mthca_free_mailbox(dev, mailbox);
  364 }
  365 
  366 int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
  367                      enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
  368 {
  369         struct mthca_dev *dev = to_mdev(ibsrq->device);
  370         struct mthca_srq *srq = to_msrq(ibsrq);
  371         int ret = 0;
  372 
  373         /* We don't support resizing SRQs (yet?) */
  374         if (attr_mask & IB_SRQ_MAX_WR)
  375                 return -EINVAL;
  376 
  377         if (attr_mask & IB_SRQ_LIMIT) {
  378                 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
  379                 if (attr->srq_limit > max_wr)
  380                         return -EINVAL;
  381 
  382                 mutex_lock(&srq->mutex);
  383                 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit);
  384                 mutex_unlock(&srq->mutex);
  385         }
  386 
  387         return ret;
  388 }
  389 
  390 int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
  391 {
  392         struct mthca_dev *dev = to_mdev(ibsrq->device);
  393         struct mthca_srq *srq = to_msrq(ibsrq);
  394         struct mthca_mailbox *mailbox;
  395         struct mthca_arbel_srq_context *arbel_ctx;
  396         struct mthca_tavor_srq_context *tavor_ctx;
  397         int err;
  398 
  399         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
  400         if (IS_ERR(mailbox))
  401                 return PTR_ERR(mailbox);
  402 
  403         err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox);
  404         if (err)
  405                 goto out;
  406 
  407         if (mthca_is_memfree(dev)) {
  408                 arbel_ctx = mailbox->buf;
  409                 srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark);
  410         } else {
  411                 tavor_ctx = mailbox->buf;
  412                 srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark);
  413         }
  414 
  415         srq_attr->max_wr  = srq->max - 1;
  416         srq_attr->max_sge = srq->max_gs;
  417 
  418 out:
  419         mthca_free_mailbox(dev, mailbox);
  420 
  421         return err;
  422 }
  423 
  424 void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
  425                      enum ib_event_type event_type)
  426 {
  427         struct mthca_srq *srq;
  428         struct ib_event event;
  429 
  430         spin_lock(&dev->srq_table.lock);
  431         srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
  432         if (srq)
  433                 ++srq->refcount;
  434         spin_unlock(&dev->srq_table.lock);
  435 
  436         if (!srq) {
  437                 mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
  438                 return;
  439         }
  440 
  441         if (!srq->ibsrq.event_handler)
  442                 goto out;
  443 
  444         event.device      = &dev->ib_dev;
  445         event.event       = event_type;
  446         event.element.srq = &srq->ibsrq;
  447         srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
  448 
  449 out:
  450         spin_lock(&dev->srq_table.lock);
  451         if (!--srq->refcount)
  452                 wake_up(&srq->wait);
  453         spin_unlock(&dev->srq_table.lock);
  454 }
  455 
  456 /*
  457  * This function must be called with IRQs disabled.
  458  */
  459 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
  460 {
  461         int ind;
  462         struct mthca_next_seg *last_free;
  463 
  464         ind = wqe_addr >> srq->wqe_shift;
  465 
  466         spin_lock(&srq->lock);
  467 
  468         last_free = get_wqe(srq, srq->last_free);
  469         *wqe_to_link(last_free) = ind;
  470         last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
  471         *wqe_to_link(get_wqe(srq, ind)) = -1;
  472         srq->last_free = ind;
  473 
  474         spin_unlock(&srq->lock);
  475 }
  476 
  477 int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
  478                               const struct ib_recv_wr **bad_wr)
  479 {
  480         struct mthca_dev *dev = to_mdev(ibsrq->device);
  481         struct mthca_srq *srq = to_msrq(ibsrq);
  482         unsigned long flags;
  483         int err = 0;
  484         int first_ind;
  485         int ind;
  486         int next_ind;
  487         int nreq;
  488         int i;
  489         void *wqe;
  490         void *prev_wqe;
  491 
  492         spin_lock_irqsave(&srq->lock, flags);
  493 
  494         first_ind = srq->first_free;
  495 
  496         for (nreq = 0; wr; wr = wr->next) {
  497                 ind       = srq->first_free;
  498                 wqe       = get_wqe(srq, ind);
  499                 next_ind  = *wqe_to_link(wqe);
  500 
  501                 if (unlikely(next_ind < 0)) {
  502                         mthca_err(dev, "SRQ %06x full\n", srq->srqn);
  503                         err = -ENOMEM;
  504                         *bad_wr = wr;
  505                         break;
  506                 }
  507 
  508                 prev_wqe  = srq->last;
  509                 srq->last = wqe;
  510 
  511                 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
  512                 /* flags field will always remain 0 */
  513 
  514                 wqe += sizeof (struct mthca_next_seg);
  515 
  516                 if (unlikely(wr->num_sge > srq->max_gs)) {
  517                         err = -EINVAL;
  518                         *bad_wr = wr;
  519                         srq->last = prev_wqe;
  520                         break;
  521                 }
  522 
  523                 for (i = 0; i < wr->num_sge; ++i) {
  524                         mthca_set_data_seg(wqe, wr->sg_list + i);
  525                         wqe += sizeof (struct mthca_data_seg);
  526                 }
  527 
  528                 if (i < srq->max_gs)
  529                         mthca_set_data_seg_inval(wqe);
  530 
  531                 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
  532                         cpu_to_be32(MTHCA_NEXT_DBD);
  533 
  534                 srq->wrid[ind]  = wr->wr_id;
  535                 srq->first_free = next_ind;
  536 
  537                 ++nreq;
  538                 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
  539                         nreq = 0;
  540 
  541                         /*
  542                          * Make sure that descriptors are written
  543                          * before doorbell is rung.
  544                          */
  545                         wmb();
  546 
  547                         mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8,
  548                                       dev->kar + MTHCA_RECEIVE_DOORBELL,
  549                                       MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
  550 
  551                         first_ind = srq->first_free;
  552                 }
  553         }
  554 
  555         if (likely(nreq)) {
  556                 /*
  557                  * Make sure that descriptors are written before
  558                  * doorbell is rung.
  559                  */
  560                 wmb();
  561 
  562                 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq,
  563                               dev->kar + MTHCA_RECEIVE_DOORBELL,
  564                               MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
  565         }
  566 
  567         /*
  568          * Make sure doorbells don't leak out of SRQ spinlock and
  569          * reach the HCA out of order:
  570          */
  571         mmiowb();
  572 
  573         spin_unlock_irqrestore(&srq->lock, flags);
  574         return err;
  575 }
  576 
  577 int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
  578                               const struct ib_recv_wr **bad_wr)
  579 {
  580         struct mthca_dev *dev = to_mdev(ibsrq->device);
  581         struct mthca_srq *srq = to_msrq(ibsrq);
  582         unsigned long flags;
  583         int err = 0;
  584         int ind;
  585         int next_ind;
  586         int nreq;
  587         int i;
  588         void *wqe;
  589 
  590         spin_lock_irqsave(&srq->lock, flags);
  591 
  592         for (nreq = 0; wr; ++nreq, wr = wr->next) {
  593                 ind       = srq->first_free;
  594                 wqe       = get_wqe(srq, ind);
  595                 next_ind  = *wqe_to_link(wqe);
  596 
  597                 if (unlikely(next_ind < 0)) {
  598                         mthca_err(dev, "SRQ %06x full\n", srq->srqn);
  599                         err = -ENOMEM;
  600                         *bad_wr = wr;
  601                         break;
  602                 }
  603 
  604                 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
  605                 /* flags field will always remain 0 */
  606 
  607                 wqe += sizeof (struct mthca_next_seg);
  608 
  609                 if (unlikely(wr->num_sge > srq->max_gs)) {
  610                         err = -EINVAL;
  611                         *bad_wr = wr;
  612                         break;
  613                 }
  614 
  615                 for (i = 0; i < wr->num_sge; ++i) {
  616                         mthca_set_data_seg(wqe, wr->sg_list + i);
  617                         wqe += sizeof (struct mthca_data_seg);
  618                 }
  619 
  620                 if (i < srq->max_gs)
  621                         mthca_set_data_seg_inval(wqe);
  622 
  623                 srq->wrid[ind]  = wr->wr_id;
  624                 srq->first_free = next_ind;
  625         }
  626 
  627         if (likely(nreq)) {
  628                 srq->counter += nreq;
  629 
  630                 /*
  631                  * Make sure that descriptors are written before
  632                  * we write doorbell record.
  633                  */
  634                 wmb();
  635                 *srq->db = cpu_to_be32(srq->counter);
  636         }
  637 
  638         spin_unlock_irqrestore(&srq->lock, flags);
  639         return err;
  640 }
  641 
  642 int mthca_max_srq_sge(struct mthca_dev *dev)
  643 {
  644         if (mthca_is_memfree(dev))
  645                 return dev->limits.max_sg;
  646 
  647         /*
  648          * SRQ allocations are based on powers of 2 for Tavor,
  649          * (although they only need to be multiples of 16 bytes).
  650          *
  651          * Therefore, we need to base the max number of sg entries on
  652          * the largest power of 2 descriptor size that is <= to the
  653          * actual max WQE descriptor size, rather than return the
  654          * max_sg value given by the firmware (which is based on WQE
  655          * sizes as multiples of 16, not powers of 2).
  656          *
  657          * If SRQ implementation is changed for Tavor to be based on
  658          * multiples of 16, the calculation below can be deleted and
  659          * the FW max_sg value returned.
  660          */
  661         return min_t(int, dev->limits.max_sg,
  662                      ((1 << (fls(dev->limits.max_desc_sz) - 1)) -
  663                       sizeof (struct mthca_next_seg)) /
  664                      sizeof (struct mthca_data_seg));
  665 }
  666 
  667 int mthca_init_srq_table(struct mthca_dev *dev)
  668 {
  669         int err;
  670 
  671         if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
  672                 return 0;
  673 
  674         spin_lock_init(&dev->srq_table.lock);
  675 
  676         err = mthca_alloc_init(&dev->srq_table.alloc,
  677                                dev->limits.num_srqs,
  678                                dev->limits.num_srqs - 1,
  679                                dev->limits.reserved_srqs);
  680         if (err)
  681                 return err;
  682 
  683         err = mthca_array_init(&dev->srq_table.srq,
  684                                dev->limits.num_srqs);
  685         if (err)
  686                 mthca_alloc_cleanup(&dev->srq_table.alloc);
  687 
  688         return err;
  689 }
  690 
  691 void mthca_cleanup_srq_table(struct mthca_dev *dev)
  692 {
  693         if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
  694                 return;
  695 
  696         mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
  697         mthca_alloc_cleanup(&dev->srq_table.alloc);
  698 }

Cache object: ae36d4f300825626bafc86f47b30cc81


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.