The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/qlxge/qls_isr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    5  *
    6  * Copyright (c) 2013-2014 Qlogic Corporation
    7  * All rights reserved.
    8  *
    9  *  Redistribution and use in source and binary forms, with or without
   10  *  modification, are permitted provided that the following conditions
   11  *  are met:
   12  *
   13  *  1. Redistributions of source code must retain the above copyright
   14  *     notice, this list of conditions and the following disclaimer.
   15  *  2. Redistributions in binary form must reproduce the above copyright
   16  *     notice, this list of conditions and the following disclaimer in the
   17  *     documentation and/or other materials provided with the distribution.
   18  *
   19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  *  POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 
   32 /*
   33  * File: qls_isr.c
   34  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
   35  */
   36 #include <sys/cdefs.h>
   37 __FBSDID("$FreeBSD$");
   38 
   39 #include "qls_os.h"
   40 #include "qls_hw.h"
   41 #include "qls_def.h"
   42 #include "qls_inline.h"
   43 #include "qls_ver.h"
   44 #include "qls_glbl.h"
   45 #include "qls_dbg.h"
   46 
   47 static void
   48 qls_tx_comp(qla_host_t *ha, uint32_t txr_idx, q81_tx_mac_comp_t *tx_comp)
   49 {
   50         qla_tx_buf_t *txb;
   51         uint32_t tx_idx = tx_comp->tid_lo;
   52 
   53         if (tx_idx >= NUM_TX_DESCRIPTORS) {
   54                 ha->qla_initiate_recovery = 1;
   55                 return;
   56         }
   57 
   58         txb = &ha->tx_ring[txr_idx].tx_buf[tx_idx];
   59 
   60         if (txb->m_head) {
   61                 if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
   62                 bus_dmamap_sync(ha->tx_tag, txb->map,
   63                         BUS_DMASYNC_POSTWRITE);
   64                 bus_dmamap_unload(ha->tx_tag, txb->map);
   65                 m_freem(txb->m_head);
   66 
   67                 txb->m_head = NULL;
   68         }
   69 
   70         ha->tx_ring[txr_idx].txr_done++;
   71 
   72         if (ha->tx_ring[txr_idx].txr_done == NUM_TX_DESCRIPTORS)
   73                 ha->tx_ring[txr_idx].txr_done = 0;
   74 }
   75 
   76 static void
   77 qls_replenish_rx(qla_host_t *ha, uint32_t r_idx)
   78 {
   79         qla_rx_buf_t                    *rxb;
   80         qla_rx_ring_t                   *rxr;
   81         int                             count;
   82         volatile q81_bq_addr_e_t        *sbq_e;
   83 
   84         rxr = &ha->rx_ring[r_idx];
   85 
   86         count = rxr->rx_free;
   87         sbq_e = rxr->sbq_vaddr;
   88 
   89         while (count--) {
   90                 rxb = &rxr->rx_buf[rxr->sbq_next];
   91 
   92                 if (rxb->m_head == NULL) {
   93                         if (qls_get_mbuf(ha, rxb, NULL) != 0) {
   94                                 device_printf(ha->pci_dev,
   95                                         "%s: qls_get_mbuf [0,%d,%d] failed\n",
   96                                         __func__, rxr->sbq_next, r_idx);
   97                                 rxb->m_head = NULL;
   98                                 break;
   99                         }
  100                 }
  101 
  102                 if (rxb->m_head != NULL) {
  103                         sbq_e[rxr->sbq_next].addr_lo = (uint32_t)rxb->paddr;
  104                         sbq_e[rxr->sbq_next].addr_hi =
  105                                 (uint32_t)(rxb->paddr >> 32);
  106 
  107                         rxr->sbq_next++;
  108                         if (rxr->sbq_next == NUM_RX_DESCRIPTORS)
  109                                 rxr->sbq_next = 0;
  110 
  111                         rxr->sbq_free++;
  112                         rxr->rx_free--;
  113                 }
  114 
  115                 if (rxr->sbq_free == 16) {
  116                         rxr->sbq_in += 16;
  117                         rxr->sbq_in = rxr->sbq_in & (NUM_RX_DESCRIPTORS - 1);
  118                         rxr->sbq_free = 0;
  119 
  120                         Q81_WR_SBQ_PROD_IDX(r_idx, (rxr->sbq_in));
  121                 }
  122         }
  123 }
  124 
  125 static int
  126 qls_rx_comp(qla_host_t *ha, uint32_t rxr_idx, uint32_t cq_idx, q81_rx_t *cq_e)
  127 {
  128         qla_rx_buf_t    *rxb;
  129         qla_rx_ring_t   *rxr;
  130         device_t        dev = ha->pci_dev;
  131         struct mbuf     *mp = NULL;
  132         struct ifnet    *ifp = ha->ifp;
  133         struct lro_ctrl *lro;
  134         struct ether_vlan_header *eh;
  135 
  136         rxr = &ha->rx_ring[rxr_idx];
  137 
  138         lro = &rxr->lro;
  139 
  140         rxb = &rxr->rx_buf[rxr->rx_next];
  141 
  142         if (!(cq_e->flags1 & Q81_RX_FLAGS1_DS)) {
  143                 device_printf(dev, "%s: DS bit not set \n", __func__);
  144                 return -1;
  145         }
  146         if (rxb->paddr != cq_e->b_paddr) {
  147                 device_printf(dev,
  148                         "%s: (rxb->paddr != cq_e->b_paddr)[%p, %p] \n",
  149                         __func__, (void *)rxb->paddr, (void *)cq_e->b_paddr);
  150 
  151                 Q81_SET_CQ_INVALID(cq_idx);
  152 
  153                 ha->qla_initiate_recovery = 1;
  154 
  155                 return(-1);
  156         }
  157 
  158         rxr->rx_int++;
  159 
  160         if ((cq_e->flags1 & Q81_RX_FLAGS1_ERR_MASK) == 0) {
  161                 mp = rxb->m_head;
  162                 rxb->m_head = NULL;
  163 
  164                 if (mp == NULL) {
  165                         device_printf(dev, "%s: mp == NULL\n", __func__);
  166                 } else {
  167                         mp->m_flags |= M_PKTHDR;
  168                         mp->m_pkthdr.len = cq_e->length;
  169                         mp->m_pkthdr.rcvif = ifp;
  170                         mp->m_len = cq_e->length;
  171 
  172                         eh = mtod(mp, struct ether_vlan_header *);
  173 
  174                         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
  175                                 uint32_t *data = (uint32_t *)eh;
  176 
  177                                 mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
  178                                 mp->m_flags |= M_VLANTAG;
  179 
  180                                 *(data + 3) = *(data + 2);
  181                                 *(data + 2) = *(data + 1);
  182                                 *(data + 1) = *data;
  183 
  184                                 m_adj(mp, ETHER_VLAN_ENCAP_LEN);
  185                         }
  186 
  187                         if ((cq_e->flags1 & Q81_RX_FLAGS1_RSS_MATCH_MASK)) {
  188                                 rxr->rss_int++;
  189                                 mp->m_pkthdr.flowid = cq_e->rss;
  190                                 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE_HASH);
  191                         }
  192                         if (cq_e->flags0 & (Q81_RX_FLAGS0_TE |
  193                                 Q81_RX_FLAGS0_NU | Q81_RX_FLAGS0_IE)) {
  194                                 mp->m_pkthdr.csum_flags = 0;
  195                         } else {
  196                                 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED |
  197                                         CSUM_IP_VALID | CSUM_DATA_VALID |
  198                                         CSUM_PSEUDO_HDR;
  199                                 mp->m_pkthdr.csum_data = 0xFFFF;
  200                         }
  201                         if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
  202 
  203                         if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) {
  204                                 /* LRO packet has been successfully queued */
  205                         } else {
  206                                 (*ifp->if_input)(ifp, mp);
  207                         }
  208                 }
  209         } else {
  210                 device_printf(dev, "%s: err [0%08x]\n", __func__, cq_e->flags1);
  211         }
  212 
  213         rxr->rx_free++;
  214         rxr->rx_next++;
  215 
  216         if (rxr->rx_next == NUM_RX_DESCRIPTORS)
  217                 rxr->rx_next = 0;
  218 
  219         if ((rxr->rx_free + rxr->sbq_free) >= 16)
  220                 qls_replenish_rx(ha, rxr_idx);
  221 
  222         return 0;
  223 }
  224 
  225 static void
  226 qls_cq_isr(qla_host_t *ha, uint32_t cq_idx)
  227 {
  228         q81_cq_e_t *cq_e, *cq_b;
  229         uint32_t i, cq_comp_idx;
  230         int ret = 0, tx_comp_done = 0;
  231         struct lro_ctrl *lro;
  232 
  233         cq_b = ha->rx_ring[cq_idx].cq_base_vaddr;
  234         lro = &ha->rx_ring[cq_idx].lro;
  235 
  236         cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr);
  237 
  238         i = ha->rx_ring[cq_idx].cq_next;
  239 
  240         while (i != cq_comp_idx) {
  241                 cq_e = &cq_b[i];
  242 
  243                 switch (cq_e->opcode) {
  244                 case Q81_IOCB_TX_MAC:
  245                 case Q81_IOCB_TX_TSO:
  246                         qls_tx_comp(ha, cq_idx, (q81_tx_mac_comp_t *)cq_e);
  247                         tx_comp_done++;
  248                         break;
  249 
  250                 case Q81_IOCB_RX:
  251                         ret = qls_rx_comp(ha, cq_idx, i, (q81_rx_t *)cq_e);
  252 
  253                         break;
  254 
  255                 case Q81_IOCB_MPI:
  256                 case Q81_IOCB_SYS:
  257                 default:
  258                         device_printf(ha->pci_dev, "%s[%d %d 0x%x]: illegal \n",
  259                                 __func__, i, (*(ha->rx_ring[cq_idx].cqi_vaddr)),
  260                                 cq_e->opcode);
  261                         qls_dump_buf32(ha, __func__, cq_e,
  262                                 (sizeof (q81_cq_e_t) >> 2));
  263                         break;
  264                 }
  265 
  266                 i++;
  267                 if (i == NUM_CQ_ENTRIES)
  268                         i = 0;
  269 
  270                 if (ret) {
  271                         break;
  272                 }
  273 
  274                 if (i == cq_comp_idx) {
  275                         cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr);
  276                 }
  277 
  278                 if (tx_comp_done) {
  279                         taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
  280                         tx_comp_done = 0;
  281                 }
  282         }
  283 
  284         tcp_lro_flush_all(lro);
  285 
  286         ha->rx_ring[cq_idx].cq_next = cq_comp_idx;
  287 
  288         if (!ret) {
  289                 Q81_WR_CQ_CONS_IDX(cq_idx, (ha->rx_ring[cq_idx].cq_next));
  290         }
  291         if (tx_comp_done)
  292                 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
  293 
  294         return;
  295 }
  296 
  297 static void
  298 qls_mbx_isr(qla_host_t *ha)
  299 {
  300         uint32_t data;
  301         int i;
  302         device_t dev = ha->pci_dev;
  303 
  304         if (qls_mbx_rd_reg(ha, 0, &data) == 0) {
  305                 if ((data & 0xF000) == 0x4000) {
  306                         ha->mbox[0] = data;
  307                         for (i = 1; i < Q81_NUM_MBX_REGISTERS; i++) {
  308                                 if (qls_mbx_rd_reg(ha, i, &data))
  309                                         break; 
  310                                 ha->mbox[i] = data;
  311                         }
  312                         ha->mbx_done = 1;
  313                 } else if ((data & 0xF000) == 0x8000) {
  314                         /* we have an AEN */
  315 
  316                         ha->aen[0] = data;
  317                         for (i = 1; i < Q81_NUM_AEN_REGISTERS; i++) {
  318                                 if (qls_mbx_rd_reg(ha, i, &data))
  319                                         break; 
  320                                 ha->aen[i] = data;
  321                         }
  322                         device_printf(dev,"%s: AEN "
  323                                 "[0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
  324                                 " 0x%08x 0x%08x 0x%08x 0x%08x]\n",
  325                                 __func__,
  326                                 ha->aen[0], ha->aen[1], ha->aen[2],
  327                                 ha->aen[3], ha->aen[4], ha->aen[5],
  328                                 ha->aen[6], ha->aen[7], ha->aen[8]);
  329 
  330                         switch ((ha->aen[0] & 0xFFFF)) {
  331                         case 0x8011:
  332                                 ha->link_up = 1;
  333                                 break;
  334 
  335                         case 0x8012:
  336                                 ha->link_up = 0;
  337                                 break;
  338 
  339                         case 0x8130:
  340                                 ha->link_hw_info = ha->aen[1];
  341                                 break;
  342 
  343                         case 0x8131:
  344                                 ha->link_hw_info = 0;
  345                                 break;
  346                         }
  347                 } 
  348         }
  349         WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_CLR_RTH_INTR);
  350 
  351         return;
  352 }
  353 
  354 void
  355 qls_isr(void *arg)
  356 {
  357         qla_ivec_t *ivec = arg;
  358         qla_host_t *ha;
  359         uint32_t status;
  360         uint32_t cq_idx;
  361         device_t dev;
  362 
  363         ha = ivec->ha;
  364         cq_idx = ivec->cq_idx;
  365         dev = ha->pci_dev;
  366 
  367         status = READ_REG32(ha, Q81_CTL_STATUS);
  368 
  369         if (status & Q81_CTL_STATUS_FE) {
  370                 device_printf(dev, "%s fatal error\n", __func__);
  371                 return;
  372         }
  373 
  374         if ((cq_idx == 0) && (status & Q81_CTL_STATUS_PI)) {
  375                 qls_mbx_isr(ha);
  376         }
  377 
  378         status = READ_REG32(ha, Q81_CTL_INTR_STATUS1);
  379 
  380         if (status & ( 0x1 << cq_idx))
  381                 qls_cq_isr(ha, cq_idx);
  382 
  383         Q81_ENABLE_INTR(ha, cq_idx);
  384 
  385         return;
  386 }

Cache object: 91dc78b2a090077893915a340a9f87b0


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.