The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/iavf/iavf_adminq.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* SPDX-License-Identifier: BSD-3-Clause */
    2 /*  Copyright (c) 2021, Intel Corporation
    3  *  All rights reserved.
    4  *
    5  *  Redistribution and use in source and binary forms, with or without
    6  *  modification, are permitted provided that the following conditions are met:
    7  *
    8  *   1. Redistributions of source code must retain the above copyright notice,
    9  *      this list of conditions and the following disclaimer.
   10  *
   11  *   2. Redistributions in binary form must reproduce the above copyright
   12  *      notice, this list of conditions and the following disclaimer in the
   13  *      documentation and/or other materials provided with the distribution.
   14  *
   15  *   3. Neither the name of the Intel Corporation nor the names of its
   16  *      contributors may be used to endorse or promote products derived from
   17  *      this software without specific prior written permission.
   18  *
   19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  *  POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 /*$FreeBSD$*/
   32 
   33 #include "iavf_status.h"
   34 #include "iavf_type.h"
   35 #include "iavf_register.h"
   36 #include "iavf_adminq.h"
   37 #include "iavf_prototype.h"
   38 
   39 /**
   40  *  iavf_adminq_init_regs - Initialize AdminQ registers
   41  *  @hw: pointer to the hardware structure
   42  *
   43  *  This assumes the alloc_asq and alloc_arq functions have already been called
   44  **/
   45 STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
   46 {
   47         /* set head and tail registers in our local struct */
   48         hw->aq.asq.tail = IAVF_VF_ATQT1;
   49         hw->aq.asq.head = IAVF_VF_ATQH1;
   50         hw->aq.asq.len  = IAVF_VF_ATQLEN1;
   51         hw->aq.asq.bal  = IAVF_VF_ATQBAL1;
   52         hw->aq.asq.bah  = IAVF_VF_ATQBAH1;
   53         hw->aq.arq.tail = IAVF_VF_ARQT1;
   54         hw->aq.arq.head = IAVF_VF_ARQH1;
   55         hw->aq.arq.len  = IAVF_VF_ARQLEN1;
   56         hw->aq.arq.bal  = IAVF_VF_ARQBAL1;
   57         hw->aq.arq.bah  = IAVF_VF_ARQBAH1;
   58 }
   59 
   60 /**
   61  *  iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
   62  *  @hw: pointer to the hardware structure
   63  **/
   64 enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
   65 {
   66         enum iavf_status ret_code;
   67 
   68         ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
   69                                          iavf_mem_atq_ring,
   70                                          (hw->aq.num_asq_entries *
   71                                          sizeof(struct iavf_aq_desc)),
   72                                          IAVF_ADMINQ_DESC_ALIGNMENT);
   73         if (ret_code)
   74                 return ret_code;
   75 
   76         ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
   77                                           (hw->aq.num_asq_entries *
   78                                           sizeof(struct iavf_asq_cmd_details)));
   79         if (ret_code) {
   80                 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
   81                 return ret_code;
   82         }
   83 
   84         return ret_code;
   85 }
   86 
   87 /**
   88  *  iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
   89  *  @hw: pointer to the hardware structure
   90  **/
   91 enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
   92 {
   93         enum iavf_status ret_code;
   94 
   95         ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
   96                                          iavf_mem_arq_ring,
   97                                          (hw->aq.num_arq_entries *
   98                                          sizeof(struct iavf_aq_desc)),
   99                                          IAVF_ADMINQ_DESC_ALIGNMENT);
  100 
  101         return ret_code;
  102 }
  103 
  104 /**
  105  *  iavf_free_adminq_asq - Free Admin Queue send rings
  106  *  @hw: pointer to the hardware structure
  107  *
  108  *  This assumes the posted send buffers have already been cleaned
  109  *  and de-allocated
  110  **/
  111 void iavf_free_adminq_asq(struct iavf_hw *hw)
  112 {
  113         iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
  114         iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
  115 }
  116 
  117 /**
  118  *  iavf_free_adminq_arq - Free Admin Queue receive rings
  119  *  @hw: pointer to the hardware structure
  120  *
  121  *  This assumes the posted receive buffers have already been cleaned
  122  *  and de-allocated
  123  **/
  124 void iavf_free_adminq_arq(struct iavf_hw *hw)
  125 {
  126         iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
  127 }
  128 
  129 /**
  130  *  iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
  131  *  @hw: pointer to the hardware structure
  132  **/
  133 STATIC enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
  134 {
  135         enum iavf_status ret_code;
  136         struct iavf_aq_desc *desc;
  137         struct iavf_dma_mem *bi;
  138         int i;
  139 
  140         /* We'll be allocating the buffer info memory first, then we can
  141          * allocate the mapped buffers for the event processing
  142          */
  143 
  144         /* buffer_info structures do not need alignment */
  145         ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
  146                 (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
  147         if (ret_code)
  148                 goto alloc_arq_bufs;
  149         hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
  150 
  151         /* allocate the mapped buffers */
  152         for (i = 0; i < hw->aq.num_arq_entries; i++) {
  153                 bi = &hw->aq.arq.r.arq_bi[i];
  154                 ret_code = iavf_allocate_dma_mem(hw, bi,
  155                                                  iavf_mem_arq_buf,
  156                                                  hw->aq.arq_buf_size,
  157                                                  IAVF_ADMINQ_DESC_ALIGNMENT);
  158                 if (ret_code)
  159                         goto unwind_alloc_arq_bufs;
  160 
  161                 /* now configure the descriptors for use */
  162                 desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
  163 
  164                 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
  165                 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
  166                         desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
  167                 desc->opcode = 0;
  168                 /* This is in accordance with Admin queue design, there is no
  169                  * register for buffer size configuration
  170                  */
  171                 desc->datalen = CPU_TO_LE16((u16)bi->size);
  172                 desc->retval = 0;
  173                 desc->cookie_high = 0;
  174                 desc->cookie_low = 0;
  175                 desc->params.external.addr_high =
  176                         CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
  177                 desc->params.external.addr_low =
  178                         CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
  179                 desc->params.external.param0 = 0;
  180                 desc->params.external.param1 = 0;
  181         }
  182 
  183 alloc_arq_bufs:
  184         return ret_code;
  185 
  186 unwind_alloc_arq_bufs:
  187         /* don't try to free the one that failed... */
  188         i--;
  189         for (; i >= 0; i--)
  190                 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
  191         iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
  192 
  193         return ret_code;
  194 }
  195 
  196 /**
  197  *  iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
  198  *  @hw: pointer to the hardware structure
  199  **/
  200 STATIC enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
  201 {
  202         enum iavf_status ret_code;
  203         struct iavf_dma_mem *bi;
  204         int i;
  205 
  206         /* No mapped memory needed yet, just the buffer info structures */
  207         ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
  208                 (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
  209         if (ret_code)
  210                 goto alloc_asq_bufs;
  211         hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
  212 
  213         /* allocate the mapped buffers */
  214         for (i = 0; i < hw->aq.num_asq_entries; i++) {
  215                 bi = &hw->aq.asq.r.asq_bi[i];
  216                 ret_code = iavf_allocate_dma_mem(hw, bi,
  217                                                  iavf_mem_asq_buf,
  218                                                  hw->aq.asq_buf_size,
  219                                                  IAVF_ADMINQ_DESC_ALIGNMENT);
  220                 if (ret_code)
  221                         goto unwind_alloc_asq_bufs;
  222         }
  223 alloc_asq_bufs:
  224         return ret_code;
  225 
  226 unwind_alloc_asq_bufs:
  227         /* don't try to free the one that failed... */
  228         i--;
  229         for (; i >= 0; i--)
  230                 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
  231         iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
  232 
  233         return ret_code;
  234 }
  235 
  236 /**
  237  *  iavf_free_arq_bufs - Free receive queue buffer info elements
  238  *  @hw: pointer to the hardware structure
  239  **/
  240 STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
  241 {
  242         int i;
  243 
  244         /* free descriptors */
  245         for (i = 0; i < hw->aq.num_arq_entries; i++)
  246                 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
  247 
  248         /* free the descriptor memory */
  249         iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
  250 
  251         /* free the dma header */
  252         iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
  253 }
  254 
  255 /**
  256  *  iavf_free_asq_bufs - Free send queue buffer info elements
  257  *  @hw: pointer to the hardware structure
  258  **/
  259 STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
  260 {
  261         int i;
  262 
  263         /* only unmap if the address is non-NULL */
  264         for (i = 0; i < hw->aq.num_asq_entries; i++)
  265                 if (hw->aq.asq.r.asq_bi[i].pa)
  266                         iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
  267 
  268         /* free the buffer info list */
  269         iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
  270 
  271         /* free the descriptor memory */
  272         iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
  273 
  274         /* free the dma header */
  275         iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
  276 }
  277 
  278 /**
  279  *  iavf_config_asq_regs - configure ASQ registers
  280  *  @hw: pointer to the hardware structure
  281  *
  282  *  Configure base address and length registers for the transmit queue
  283  **/
  284 STATIC enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
  285 {
  286         enum iavf_status ret_code = IAVF_SUCCESS;
  287         u32 reg = 0;
  288 
  289         /* Clear Head and Tail */
  290         wr32(hw, hw->aq.asq.head, 0);
  291         wr32(hw, hw->aq.asq.tail, 0);
  292 
  293         /* set starting point */
  294         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
  295                                   IAVF_VF_ATQLEN1_ATQENABLE_MASK));
  296         wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
  297         wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
  298 
  299         /* Check one register to verify that config was applied */
  300         reg = rd32(hw, hw->aq.asq.bal);
  301         if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
  302                 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
  303 
  304         return ret_code;
  305 }
  306 
  307 /**
  308  *  iavf_config_arq_regs - ARQ register configuration
  309  *  @hw: pointer to the hardware structure
  310  *
  311  * Configure base address and length registers for the receive (event queue)
  312  **/
  313 STATIC enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
  314 {
  315         enum iavf_status ret_code = IAVF_SUCCESS;
  316         u32 reg = 0;
  317 
  318         /* Clear Head and Tail */
  319         wr32(hw, hw->aq.arq.head, 0);
  320         wr32(hw, hw->aq.arq.tail, 0);
  321 
  322         /* set starting point */
  323         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
  324                                   IAVF_VF_ARQLEN1_ARQENABLE_MASK));
  325         wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
  326         wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
  327 
  328         /* Update tail in the HW to post pre-allocated buffers */
  329         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
  330 
  331         /* Check one register to verify that config was applied */
  332         reg = rd32(hw, hw->aq.arq.bal);
  333         if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
  334                 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
  335 
  336         return ret_code;
  337 }
  338 
  339 /**
  340  *  iavf_init_asq - main initialization routine for ASQ
  341  *  @hw: pointer to the hardware structure
  342  *
  343  *  This is the main initialization routine for the Admin Send Queue
  344  *  Prior to calling this function, drivers *MUST* set the following fields
  345  *  in the hw->aq structure:
  346  *     - hw->aq.num_asq_entries
  347  *     - hw->aq.arq_buf_size
  348  *
  349  *  Do *NOT* hold the lock when calling this as the memory allocation routines
  350  *  called are not going to be atomic context safe
  351  **/
  352 enum iavf_status iavf_init_asq(struct iavf_hw *hw)
  353 {
  354         enum iavf_status ret_code = IAVF_SUCCESS;
  355 
  356         if (hw->aq.asq.count > 0) {
  357                 /* queue already initialized */
  358                 ret_code = IAVF_ERR_NOT_READY;
  359                 goto init_adminq_exit;
  360         }
  361 
  362         /* verify input for valid configuration */
  363         if ((hw->aq.num_asq_entries == 0) ||
  364             (hw->aq.asq_buf_size == 0)) {
  365                 ret_code = IAVF_ERR_CONFIG;
  366                 goto init_adminq_exit;
  367         }
  368 
  369         hw->aq.asq.next_to_use = 0;
  370         hw->aq.asq.next_to_clean = 0;
  371 
  372         /* allocate the ring memory */
  373         ret_code = iavf_alloc_adminq_asq_ring(hw);
  374         if (ret_code != IAVF_SUCCESS)
  375                 goto init_adminq_exit;
  376 
  377         /* allocate buffers in the rings */
  378         ret_code = iavf_alloc_asq_bufs(hw);
  379         if (ret_code != IAVF_SUCCESS)
  380                 goto init_adminq_free_rings;
  381 
  382         /* initialize base registers */
  383         ret_code = iavf_config_asq_regs(hw);
  384         if (ret_code != IAVF_SUCCESS)
  385                 goto init_config_regs;
  386 
  387         /* success! */
  388         hw->aq.asq.count = hw->aq.num_asq_entries;
  389         goto init_adminq_exit;
  390 
  391 init_adminq_free_rings:
  392         iavf_free_adminq_asq(hw);
  393         return ret_code;
  394 
  395 init_config_regs:
  396         iavf_free_asq_bufs(hw);
  397 
  398 init_adminq_exit:
  399         return ret_code;
  400 }
  401 
  402 /**
  403  *  iavf_init_arq - initialize ARQ
  404  *  @hw: pointer to the hardware structure
  405  *
  406  *  The main initialization routine for the Admin Receive (Event) Queue.
  407  *  Prior to calling this function, drivers *MUST* set the following fields
  408  *  in the hw->aq structure:
  409  *     - hw->aq.num_asq_entries
  410  *     - hw->aq.arq_buf_size
  411  *
  412  *  Do *NOT* hold the lock when calling this as the memory allocation routines
  413  *  called are not going to be atomic context safe
  414  **/
  415 enum iavf_status iavf_init_arq(struct iavf_hw *hw)
  416 {
  417         enum iavf_status ret_code = IAVF_SUCCESS;
  418 
  419         if (hw->aq.arq.count > 0) {
  420                 /* queue already initialized */
  421                 ret_code = IAVF_ERR_NOT_READY;
  422                 goto init_adminq_exit;
  423         }
  424 
  425         /* verify input for valid configuration */
  426         if ((hw->aq.num_arq_entries == 0) ||
  427             (hw->aq.arq_buf_size == 0)) {
  428                 ret_code = IAVF_ERR_CONFIG;
  429                 goto init_adminq_exit;
  430         }
  431 
  432         hw->aq.arq.next_to_use = 0;
  433         hw->aq.arq.next_to_clean = 0;
  434 
  435         /* allocate the ring memory */
  436         ret_code = iavf_alloc_adminq_arq_ring(hw);
  437         if (ret_code != IAVF_SUCCESS)
  438                 goto init_adminq_exit;
  439 
  440         /* allocate buffers in the rings */
  441         ret_code = iavf_alloc_arq_bufs(hw);
  442         if (ret_code != IAVF_SUCCESS)
  443                 goto init_adminq_free_rings;
  444 
  445         /* initialize base registers */
  446         ret_code = iavf_config_arq_regs(hw);
  447         if (ret_code != IAVF_SUCCESS)
  448                 goto init_adminq_free_rings;
  449 
  450         /* success! */
  451         hw->aq.arq.count = hw->aq.num_arq_entries;
  452         goto init_adminq_exit;
  453 
  454 init_adminq_free_rings:
  455         iavf_free_adminq_arq(hw);
  456 
  457 init_adminq_exit:
  458         return ret_code;
  459 }
  460 
  461 /**
  462  *  iavf_shutdown_asq - shutdown the ASQ
  463  *  @hw: pointer to the hardware structure
  464  *
  465  *  The main shutdown routine for the Admin Send Queue
  466  **/
  467 enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
  468 {
  469         enum iavf_status ret_code = IAVF_SUCCESS;
  470 
  471         iavf_acquire_spinlock(&hw->aq.asq_spinlock);
  472 
  473         if (hw->aq.asq.count == 0) {
  474                 ret_code = IAVF_ERR_NOT_READY;
  475                 goto shutdown_asq_out;
  476         }
  477 
  478         /* Stop firmware AdminQ processing */
  479         wr32(hw, hw->aq.asq.head, 0);
  480         wr32(hw, hw->aq.asq.tail, 0);
  481         wr32(hw, hw->aq.asq.len, 0);
  482         wr32(hw, hw->aq.asq.bal, 0);
  483         wr32(hw, hw->aq.asq.bah, 0);
  484 
  485         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
  486 
  487         /* free ring buffers */
  488         iavf_free_asq_bufs(hw);
  489 
  490 shutdown_asq_out:
  491         iavf_release_spinlock(&hw->aq.asq_spinlock);
  492         return ret_code;
  493 }
  494 
  495 /**
  496  *  iavf_shutdown_arq - shutdown ARQ
  497  *  @hw: pointer to the hardware structure
  498  *
  499  *  The main shutdown routine for the Admin Receive Queue
  500  **/
  501 enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
  502 {
  503         enum iavf_status ret_code = IAVF_SUCCESS;
  504 
  505         iavf_acquire_spinlock(&hw->aq.arq_spinlock);
  506 
  507         if (hw->aq.arq.count == 0) {
  508                 ret_code = IAVF_ERR_NOT_READY;
  509                 goto shutdown_arq_out;
  510         }
  511 
  512         /* Stop firmware AdminQ processing */
  513         wr32(hw, hw->aq.arq.head, 0);
  514         wr32(hw, hw->aq.arq.tail, 0);
  515         wr32(hw, hw->aq.arq.len, 0);
  516         wr32(hw, hw->aq.arq.bal, 0);
  517         wr32(hw, hw->aq.arq.bah, 0);
  518 
  519         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
  520 
  521         /* free ring buffers */
  522         iavf_free_arq_bufs(hw);
  523 
  524 shutdown_arq_out:
  525         iavf_release_spinlock(&hw->aq.arq_spinlock);
  526         return ret_code;
  527 }
  528 
  529 /**
  530  *  iavf_init_adminq - main initialization routine for Admin Queue
  531  *  @hw: pointer to the hardware structure
  532  *
  533  *  Prior to calling this function, drivers *MUST* set the following fields
  534  *  in the hw->aq structure:
  535  *     - hw->aq.num_asq_entries
  536  *     - hw->aq.num_arq_entries
  537  *     - hw->aq.arq_buf_size
  538  *     - hw->aq.asq_buf_size
  539  **/
  540 enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
  541 {
  542         enum iavf_status ret_code;
  543 
  544         /* verify input for valid configuration */
  545         if ((hw->aq.num_arq_entries == 0) ||
  546             (hw->aq.num_asq_entries == 0) ||
  547             (hw->aq.arq_buf_size == 0) ||
  548             (hw->aq.asq_buf_size == 0)) {
  549                 ret_code = IAVF_ERR_CONFIG;
  550                 goto init_adminq_exit;
  551         }
  552         iavf_init_spinlock(&hw->aq.asq_spinlock);
  553         iavf_init_spinlock(&hw->aq.arq_spinlock);
  554 
  555         /* Set up register offsets */
  556         iavf_adminq_init_regs(hw);
  557 
  558         /* setup ASQ command write back timeout */
  559         hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
  560 
  561         /* allocate the ASQ */
  562         ret_code = iavf_init_asq(hw);
  563         if (ret_code != IAVF_SUCCESS)
  564                 goto init_adminq_destroy_spinlocks;
  565 
  566         /* allocate the ARQ */
  567         ret_code = iavf_init_arq(hw);
  568         if (ret_code != IAVF_SUCCESS)
  569                 goto init_adminq_free_asq;
  570 
  571         /* success! */
  572         goto init_adminq_exit;
  573 
  574 init_adminq_free_asq:
  575         iavf_shutdown_asq(hw);
  576 init_adminq_destroy_spinlocks:
  577         iavf_destroy_spinlock(&hw->aq.asq_spinlock);
  578         iavf_destroy_spinlock(&hw->aq.arq_spinlock);
  579 
  580 init_adminq_exit:
  581         return ret_code;
  582 }
  583 
  584 /**
  585  *  iavf_shutdown_adminq - shutdown routine for the Admin Queue
  586  *  @hw: pointer to the hardware structure
  587  **/
  588 enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
  589 {
  590         enum iavf_status ret_code = IAVF_SUCCESS;
  591 
  592         if (iavf_check_asq_alive(hw))
  593                 iavf_aq_queue_shutdown(hw, true);
  594 
  595         iavf_shutdown_asq(hw);
  596         iavf_shutdown_arq(hw);
  597         iavf_destroy_spinlock(&hw->aq.asq_spinlock);
  598         iavf_destroy_spinlock(&hw->aq.arq_spinlock);
  599 
  600         return ret_code;
  601 }
  602 
  603 /**
  604  *  iavf_clean_asq - cleans Admin send queue
  605  *  @hw: pointer to the hardware structure
  606  *
  607  *  returns the number of free desc
  608  **/
  609 u16 iavf_clean_asq(struct iavf_hw *hw)
  610 {
  611         struct iavf_adminq_ring *asq = &(hw->aq.asq);
  612         struct iavf_asq_cmd_details *details;
  613         u16 ntc = asq->next_to_clean;
  614         struct iavf_aq_desc desc_cb;
  615         struct iavf_aq_desc *desc;
  616 
  617         desc = IAVF_ADMINQ_DESC(*asq, ntc);
  618         details = IAVF_ADMINQ_DETAILS(*asq, ntc);
  619         while (rd32(hw, hw->aq.asq.head) != ntc) {
  620                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
  621                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
  622 
  623                 if (details->callback) {
  624                         IAVF_ADMINQ_CALLBACK cb_func =
  625                                         (IAVF_ADMINQ_CALLBACK)details->callback;
  626                         iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
  627                                     IAVF_DMA_TO_DMA);
  628                         cb_func(hw, &desc_cb);
  629                 }
  630                 iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
  631                 iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
  632                 ntc++;
  633                 if (ntc == asq->count)
  634                         ntc = 0;
  635                 desc = IAVF_ADMINQ_DESC(*asq, ntc);
  636                 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
  637         }
  638 
  639         asq->next_to_clean = ntc;
  640 
  641         return IAVF_DESC_UNUSED(asq);
  642 }
  643 
  644 /**
  645  *  iavf_asq_done - check if FW has processed the Admin Send Queue
  646  *  @hw: pointer to the hw struct
  647  *
  648  *  Returns true if the firmware has processed all descriptors on the
  649  *  admin send queue. Returns false if there are still requests pending.
  650  **/
  651 bool iavf_asq_done(struct iavf_hw *hw)
  652 {
  653         /* AQ designers suggest use of head for better
  654          * timing reliability than DD bit
  655          */
  656         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
  657 
  658 }
  659 
  660 /**
  661  *  iavf_asq_send_command - send command to Admin Queue
  662  *  @hw: pointer to the hw struct
  663  *  @desc: prefilled descriptor describing the command (non DMA mem)
  664  *  @buff: buffer to use for indirect commands
  665  *  @buff_size: size of buffer for indirect commands
  666  *  @cmd_details: pointer to command details structure
  667  *
  668  *  This is the main send command driver routine for the Admin Queue send
  669  *  queue.  It runs the queue, cleans the queue, etc
  670  **/
  671 enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
  672                                 struct iavf_aq_desc *desc,
  673                                 void *buff, /* can be NULL */
  674                                 u16  buff_size,
  675                                 struct iavf_asq_cmd_details *cmd_details)
  676 {
  677         enum iavf_status status = IAVF_SUCCESS;
  678         struct iavf_dma_mem *dma_buff = NULL;
  679         struct iavf_asq_cmd_details *details;
  680         struct iavf_aq_desc *desc_on_ring;
  681         bool cmd_completed = false;
  682         u16  retval = 0;
  683         u32  val = 0;
  684 
  685         iavf_acquire_spinlock(&hw->aq.asq_spinlock);
  686 
  687         hw->aq.asq_last_status = IAVF_AQ_RC_OK;
  688 
  689         if (hw->aq.asq.count == 0) {
  690                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
  691                            "AQTX: Admin queue not initialized.\n");
  692                 status = IAVF_ERR_QUEUE_EMPTY;
  693                 goto asq_send_command_error;
  694         }
  695 
  696         val = rd32(hw, hw->aq.asq.head);
  697         if (val >= hw->aq.num_asq_entries) {
  698                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
  699                            "AQTX: head overrun at %d\n", val);
  700                 status = IAVF_ERR_QUEUE_EMPTY;
  701                 goto asq_send_command_error;
  702         }
  703 
  704         details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
  705         if (cmd_details) {
  706                 iavf_memcpy(details,
  707                             cmd_details,
  708                             sizeof(struct iavf_asq_cmd_details),
  709                             IAVF_NONDMA_TO_NONDMA);
  710 
  711                 /* If the cmd_details are defined copy the cookie.  The
  712                  * CPU_TO_LE32 is not needed here because the data is ignored
  713                  * by the FW, only used by the driver
  714                  */
  715                 if (details->cookie) {
  716                         desc->cookie_high =
  717                                 CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
  718                         desc->cookie_low =
  719                                 CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
  720                 }
  721         } else {
  722                 iavf_memset(details, 0,
  723                             sizeof(struct iavf_asq_cmd_details),
  724                             IAVF_NONDMA_MEM);
  725         }
  726 
  727         /* clear requested flags and then set additional flags if defined */
  728         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
  729         desc->flags |= CPU_TO_LE16(details->flags_ena);
  730 
  731         if (buff_size > hw->aq.asq_buf_size) {
  732                 iavf_debug(hw,
  733                            IAVF_DEBUG_AQ_MESSAGE,
  734                            "AQTX: Invalid buffer size: %d.\n",
  735                            buff_size);
  736                 status = IAVF_ERR_INVALID_SIZE;
  737                 goto asq_send_command_error;
  738         }
  739 
  740         if (details->postpone && !details->async) {
  741                 iavf_debug(hw,
  742                            IAVF_DEBUG_AQ_MESSAGE,
  743                            "AQTX: Async flag not set along with postpone flag");
  744                 status = IAVF_ERR_PARAM;
  745                 goto asq_send_command_error;
  746         }
  747 
  748         /* call clean and check queue available function to reclaim the
  749          * descriptors that were processed by FW, the function returns the
  750          * number of desc available
  751          */
  752         /* the clean function called here could be called in a separate thread
  753          * in case of asynchronous completions
  754          */
  755         if (iavf_clean_asq(hw) == 0) {
  756                 iavf_debug(hw,
  757                            IAVF_DEBUG_AQ_MESSAGE,
  758                            "AQTX: Error queue is full.\n");
  759                 status = IAVF_ERR_ADMIN_QUEUE_FULL;
  760                 goto asq_send_command_error;
  761         }
  762 
  763         /* initialize the temp desc pointer with the right desc */
  764         desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
  765 
  766         /* if the desc is available copy the temp desc to the right place */
  767         iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
  768                     IAVF_NONDMA_TO_DMA);
  769 
  770         /* if buff is not NULL assume indirect command */
  771         if (buff != NULL) {
  772                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
  773                 /* copy the user buff into the respective DMA buff */
  774                 iavf_memcpy(dma_buff->va, buff, buff_size,
  775                             IAVF_NONDMA_TO_DMA);
  776                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
  777 
  778                 /* Update the address values in the desc with the pa value
  779                  * for respective buffer
  780                  */
  781                 desc_on_ring->params.external.addr_high =
  782                                 CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
  783                 desc_on_ring->params.external.addr_low =
  784                                 CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
  785         }
  786 
  787         /* bump the tail */
  788         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
  789         iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
  790                       buff, buff_size);
  791         (hw->aq.asq.next_to_use)++;
  792         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
  793                 hw->aq.asq.next_to_use = 0;
  794         if (!details->postpone)
  795                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
  796 
  797         /* if cmd_details are not defined or async flag is not set,
  798          * we need to wait for desc write back
  799          */
  800         if (!details->async && !details->postpone) {
  801                 u32 total_delay = 0;
  802 
  803                 do {
  804                         /* AQ designers suggest use of head for better
  805                          * timing reliability than DD bit
  806                          */
  807                         if (iavf_asq_done(hw))
  808                                 break;
  809                         iavf_usec_delay(50);
  810                         total_delay += 50;
  811                 } while (total_delay < hw->aq.asq_cmd_timeout);
  812         }
  813 
  814         /* if ready, copy the desc back to temp */
  815         if (iavf_asq_done(hw)) {
  816                 iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
  817                             IAVF_DMA_TO_NONDMA);
  818                 if (buff != NULL)
  819                         iavf_memcpy(buff, dma_buff->va, buff_size,
  820                                     IAVF_DMA_TO_NONDMA);
  821                 retval = LE16_TO_CPU(desc->retval);
  822                 if (retval != 0) {
  823                         iavf_debug(hw,
  824                                    IAVF_DEBUG_AQ_MESSAGE,
  825                                    "AQTX: Command completed with error 0x%X.\n",
  826                                    retval);
  827 
  828                         /* strip off FW internal code */
  829                         retval &= 0xff;
  830                 }
  831                 cmd_completed = true;
  832                 if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
  833                         status = IAVF_SUCCESS;
  834                 else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
  835                         status = IAVF_ERR_NOT_READY;
  836                 else
  837                         status = IAVF_ERR_ADMIN_QUEUE_ERROR;
  838                 hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
  839         }
  840 
  841         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
  842                    "AQTX: desc and buffer writeback:\n");
  843         iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
  844 
  845         /* save writeback aq if requested */
  846         if (details->wb_desc)
  847                 iavf_memcpy(details->wb_desc, desc_on_ring,
  848                             sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
  849 
  850         /* update the error if time out occurred */
  851         if ((!cmd_completed) &&
  852             (!details->async && !details->postpone)) {
  853                 if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
  854                         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
  855                                    "AQTX: AQ Critical error.\n");
  856                         status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
  857                 } else {
  858                         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
  859                                    "AQTX: Writeback timeout.\n");
  860                         status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
  861                 }
  862         }
  863 
  864 asq_send_command_error:
  865         iavf_release_spinlock(&hw->aq.asq_spinlock);
  866         return status;
  867 }
  868 
  869 /**
  870  *  iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
  871  *  @desc:     pointer to the temp descriptor (non DMA mem)
  872  *  @opcode:   the opcode can be used to decide which flags to turn off or on
  873  *
  874  *  Fill the desc with default values
  875  **/
  876 void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
  877                                        u16 opcode)
  878 {
  879         /* zero out the desc */
  880         iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
  881                     IAVF_NONDMA_MEM);
  882         desc->opcode = CPU_TO_LE16(opcode);
  883         desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
  884 }
  885 
  886 /**
  887  *  iavf_clean_arq_element
  888  *  @hw: pointer to the hw struct
  889  *  @e: event info from the receive descriptor, includes any buffers
  890  *  @pending: number of events that could be left to process
  891  *
  892  *  This function cleans one Admin Receive Queue element and returns
  893  *  the contents through e.  It can also return how many events are
  894  *  left to process through 'pending'
  895  **/
  896 enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
  897                                              struct iavf_arq_event_info *e,
  898                                              u16 *pending)
  899 {
  900         enum iavf_status ret_code = IAVF_SUCCESS;
  901         u16 ntc = hw->aq.arq.next_to_clean;
  902         struct iavf_aq_desc *desc;
  903         struct iavf_dma_mem *bi;
  904         u16 desc_idx;
  905         u16 datalen;
  906         u16 flags;
  907         u16 ntu;
  908 
  909         /* pre-clean the event info */
  910         iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
  911 
  912         /* take the lock before we start messing with the ring */
  913         iavf_acquire_spinlock(&hw->aq.arq_spinlock);
  914 
  915         if (hw->aq.arq.count == 0) {
  916                 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
  917                            "AQRX: Admin queue not initialized.\n");
  918                 ret_code = IAVF_ERR_QUEUE_EMPTY;
  919                 goto clean_arq_element_err;
  920         }
  921 
  922         /* set next_to_use to head */
  923         ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
  924         if (ntu == ntc) {
  925                 /* nothing to do - shouldn't need to update ring's values */
  926                 ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
  927                 goto clean_arq_element_out;
  928         }
  929 
  930         /* now clean the next descriptor */
  931         desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
  932         desc_idx = ntc;
  933 
  934         hw->aq.arq_last_status =
  935                 (enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
  936         flags = LE16_TO_CPU(desc->flags);
  937         if (flags & IAVF_AQ_FLAG_ERR) {
  938                 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
  939                 iavf_debug(hw,
  940                            IAVF_DEBUG_AQ_MESSAGE,
  941                            "AQRX: Event received with error 0x%X.\n",
  942                            hw->aq.arq_last_status);
  943         }
  944 
  945         iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
  946                     IAVF_DMA_TO_NONDMA);
  947         datalen = LE16_TO_CPU(desc->datalen);
  948         e->msg_len = min(datalen, e->buf_len);
  949         if (e->msg_buf != NULL && (e->msg_len != 0))
  950                 iavf_memcpy(e->msg_buf,
  951                             hw->aq.arq.r.arq_bi[desc_idx].va,
  952                             e->msg_len, IAVF_DMA_TO_NONDMA);
  953 
  954         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
  955         iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
  956                       hw->aq.arq_buf_size);
  957 
  958         /* Restore the original datalen and buffer address in the desc,
  959          * FW updates datalen to indicate the event message
  960          * size
  961          */
  962         bi = &hw->aq.arq.r.arq_bi[ntc];
  963         iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
  964 
  965         desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
  966         if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
  967                 desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
  968         desc->datalen = CPU_TO_LE16((u16)bi->size);
  969         desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
  970         desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
  971 
  972         /* set tail = the last cleaned desc index. */
  973         wr32(hw, hw->aq.arq.tail, ntc);
  974         /* ntc is updated to tail + 1 */
  975         ntc++;
  976         if (ntc == hw->aq.num_arq_entries)
  977                 ntc = 0;
  978         hw->aq.arq.next_to_clean = ntc;
  979         hw->aq.arq.next_to_use = ntu;
  980 
  981 clean_arq_element_out:
  982         /* Set pending if needed, unlock and return */
  983         if (pending != NULL)
  984                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
  985 clean_arq_element_err:
  986         iavf_release_spinlock(&hw->aq.arq_spinlock);
  987 
  988         return ret_code;
  989 }
  990 

Cache object: 3250d1e2d33460823db26e61f87a83c7


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.