The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/qlxgbe/ql_hw.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2013-2016 Qlogic Corporation
    5  * All rights reserved.
    6  *
    7  *  Redistribution and use in source and binary forms, with or without
    8  *  modification, are permitted provided that the following conditions
    9  *  are met:
   10  *
   11  *  1. Redistributions of source code must retain the above copyright
   12  *     notice, this list of conditions and the following disclaimer.
   13  *  2. Redistributions in binary form must reproduce the above copyright
   14  *     notice, this list of conditions and the following disclaimer in the
   15  *     documentation and/or other materials provided with the distribution.
   16  *
   17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   27  *  POSSIBILITY OF SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  * File: ql_hw.c
   32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
   33  * Content: Contains Hardware dependent functions
   34  */
   35 
   36 #include <sys/cdefs.h>
   37 __FBSDID("$FreeBSD$");
   38 
   39 #include "ql_os.h"
   40 #include "ql_hw.h"
   41 #include "ql_def.h"
   42 #include "ql_inline.h"
   43 #include "ql_ver.h"
   44 #include "ql_glbl.h"
   45 #include "ql_dbg.h"
   46 #include "ql_minidump.h"
   47 
   48 /*
   49  * Static Functions
   50  */
   51 
   52 static void qla_del_rcv_cntxt(qla_host_t *ha);
   53 static int qla_init_rcv_cntxt(qla_host_t *ha);
   54 static int qla_del_xmt_cntxt(qla_host_t *ha);
   55 static int qla_init_xmt_cntxt(qla_host_t *ha);
   56 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
   57         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
   58 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
   59         uint32_t num_intrs, uint32_t create);
   60 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
   61 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
   62         int tenable, int rcv);
   63 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
   64 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
   65 
   66 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
   67                 uint8_t *hdr);
   68 static int qla_hw_add_all_mcast(qla_host_t *ha);
   69 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
   70 
   71 static int qla_init_nic_func(qla_host_t *ha);
   72 static int qla_stop_nic_func(qla_host_t *ha);
   73 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
   74 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
   75 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
   76 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
   77 static int qla_get_cam_search_mode(qla_host_t *ha);
   78 
   79 static void ql_minidump_free(qla_host_t *ha);
   80 
   81 #ifdef QL_DBG
   82 
   83 static void
   84 qla_stop_pegs(qla_host_t *ha)
   85 {
   86         uint32_t val = 1;
   87 
   88         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
   89         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
   90         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
   91         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
   92         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
   93         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
   94 }
   95 
   96 static int
   97 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
   98 {
   99         int err, ret = 0;
  100         qla_host_t *ha;
  101 
  102         err = sysctl_handle_int(oidp, &ret, 0, req);
  103 
  104         if (err || !req->newptr)
  105                 return (err);
  106 
  107         if (ret == 1) {
  108                 ha = (qla_host_t *)arg1;
  109                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
  110                         qla_stop_pegs(ha);      
  111                         QLA_UNLOCK(ha, __func__);
  112                 }
  113         }
  114 
  115         return err;
  116 }
  117 #endif /* #ifdef QL_DBG */
  118 
  119 static int
  120 qla_validate_set_port_cfg_bit(uint32_t bits)
  121 {
  122         if ((bits & 0xF) > 1)
  123                 return (-1);
  124 
  125         if (((bits >> 4) & 0xF) > 2)
  126                 return (-1);
  127 
  128         if (((bits >> 8) & 0xF) > 2)
  129                 return (-1);
  130 
  131         return (0);
  132 }
  133 
  134 static int
  135 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
  136 {
  137         int err, ret = 0;
  138         qla_host_t *ha;
  139         uint32_t cfg_bits;
  140 
  141         err = sysctl_handle_int(oidp, &ret, 0, req);
  142 
  143         if (err || !req->newptr)
  144                 return (err);
  145 
  146         ha = (qla_host_t *)arg1;
  147 
  148         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
  149                 err = qla_get_port_config(ha, &cfg_bits);
  150 
  151                 if (err)
  152                         goto qla_sysctl_set_port_cfg_exit;
  153 
  154                 if (ret & 0x1) {
  155                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
  156                 } else {
  157                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
  158                 }
  159 
  160                 ret = ret >> 4;
  161                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
  162 
  163                 if ((ret & 0xF) == 0) {
  164                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
  165                 } else if ((ret & 0xF) == 1){
  166                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
  167                 } else {
  168                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
  169                 }
  170 
  171                 ret = ret >> 4;
  172                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
  173 
  174                 if (ret == 0) {
  175                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
  176                 } else if (ret == 1){
  177                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
  178                 } else {
  179                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
  180                 }
  181 
  182                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
  183                         err = qla_set_port_config(ha, cfg_bits);
  184                         QLA_UNLOCK(ha, __func__);
  185                 } else {
  186                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
  187                 }
  188         } else {
  189                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
  190                         err = qla_get_port_config(ha, &cfg_bits);
  191                         QLA_UNLOCK(ha, __func__);
  192                 } else {
  193                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
  194                 }
  195         }
  196 
  197 qla_sysctl_set_port_cfg_exit:
  198         return err;
  199 }
  200 
  201 static int
  202 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
  203 {
  204         int err, ret = 0;
  205         qla_host_t *ha;
  206 
  207         err = sysctl_handle_int(oidp, &ret, 0, req);
  208 
  209         if (err || !req->newptr)
  210                 return (err);
  211 
  212         ha = (qla_host_t *)arg1;
  213 
  214         if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
  215                 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
  216                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
  217                         err = qla_set_cam_search_mode(ha, (uint32_t)ret);
  218                         QLA_UNLOCK(ha, __func__);
  219                 } else {
  220                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
  221                 }
  222 
  223         } else {
  224                 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
  225         }
  226 
  227         return (err);
  228 }
  229 
  230 static int
  231 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
  232 {
  233         int err, ret = 0;
  234         qla_host_t *ha;
  235 
  236         err = sysctl_handle_int(oidp, &ret, 0, req);
  237 
  238         if (err || !req->newptr)
  239                 return (err);
  240 
  241         ha = (qla_host_t *)arg1;
  242         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
  243                 err = qla_get_cam_search_mode(ha);
  244                 QLA_UNLOCK(ha, __func__);
  245         } else {
  246                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
  247         }
  248 
  249         return (err);
  250 }
  251 
  252 static void
  253 qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha)
  254 {
  255         struct sysctl_ctx_list  *ctx;
  256         struct sysctl_oid_list  *children;
  257         struct sysctl_oid       *ctx_oid;
  258 
  259         ctx = device_get_sysctl_ctx(ha->pci_dev);
  260         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
  261 
  262         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac",
  263             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_mac");
  264         children = SYSCTL_CHILDREN(ctx_oid);
  265 
  266         SYSCTL_ADD_QUAD(ctx, children,
  267                 OID_AUTO, "xmt_frames",
  268                 CTLFLAG_RD, &ha->hw.mac.xmt_frames,
  269                 "xmt_frames");
  270 
  271         SYSCTL_ADD_QUAD(ctx, children,
  272                 OID_AUTO, "xmt_bytes",
  273                 CTLFLAG_RD, &ha->hw.mac.xmt_bytes,
  274                 "xmt_frames");
  275 
  276         SYSCTL_ADD_QUAD(ctx, children,
  277                 OID_AUTO, "xmt_mcast_pkts",
  278                 CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts,
  279                 "xmt_mcast_pkts");
  280 
  281         SYSCTL_ADD_QUAD(ctx, children,
  282                 OID_AUTO, "xmt_bcast_pkts",
  283                 CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts,
  284                 "xmt_bcast_pkts");
  285 
  286         SYSCTL_ADD_QUAD(ctx, children,
  287                 OID_AUTO, "xmt_pause_frames",
  288                 CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames,
  289                 "xmt_pause_frames");
  290 
  291         SYSCTL_ADD_QUAD(ctx, children,
  292                 OID_AUTO, "xmt_cntrl_pkts",
  293                 CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts,
  294                 "xmt_cntrl_pkts");
  295 
  296         SYSCTL_ADD_QUAD(ctx, children,
  297                 OID_AUTO, "xmt_pkt_lt_64bytes",
  298                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes,
  299                 "xmt_pkt_lt_64bytes");
  300 
  301         SYSCTL_ADD_QUAD(ctx, children,
  302                 OID_AUTO, "xmt_pkt_lt_127bytes",
  303                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes,
  304                 "xmt_pkt_lt_127bytes");
  305 
  306         SYSCTL_ADD_QUAD(ctx, children,
  307                 OID_AUTO, "xmt_pkt_lt_255bytes",
  308                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes,
  309                 "xmt_pkt_lt_255bytes");
  310 
  311         SYSCTL_ADD_QUAD(ctx, children,
  312                 OID_AUTO, "xmt_pkt_lt_511bytes",
  313                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes,
  314                 "xmt_pkt_lt_511bytes");
  315 
  316         SYSCTL_ADD_QUAD(ctx, children,
  317                 OID_AUTO, "xmt_pkt_lt_1023bytes",
  318                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes,
  319                 "xmt_pkt_lt_1023bytes");
  320 
  321         SYSCTL_ADD_QUAD(ctx, children,
  322                 OID_AUTO, "xmt_pkt_lt_1518bytes",
  323                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes,
  324                 "xmt_pkt_lt_1518bytes");
  325 
  326         SYSCTL_ADD_QUAD(ctx, children,
  327                 OID_AUTO, "xmt_pkt_gt_1518bytes",
  328                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes,
  329                 "xmt_pkt_gt_1518bytes");
  330 
  331         SYSCTL_ADD_QUAD(ctx, children,
  332                 OID_AUTO, "rcv_frames",
  333                 CTLFLAG_RD, &ha->hw.mac.rcv_frames,
  334                 "rcv_frames");
  335 
  336         SYSCTL_ADD_QUAD(ctx, children,
  337                 OID_AUTO, "rcv_bytes",
  338                 CTLFLAG_RD, &ha->hw.mac.rcv_bytes,
  339                 "rcv_bytes");
  340 
  341         SYSCTL_ADD_QUAD(ctx, children,
  342                 OID_AUTO, "rcv_mcast_pkts",
  343                 CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts,
  344                 "rcv_mcast_pkts");
  345 
  346         SYSCTL_ADD_QUAD(ctx, children,
  347                 OID_AUTO, "rcv_bcast_pkts",
  348                 CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts,
  349                 "rcv_bcast_pkts");
  350 
  351         SYSCTL_ADD_QUAD(ctx, children,
  352                 OID_AUTO, "rcv_pause_frames",
  353                 CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames,
  354                 "rcv_pause_frames");
  355 
  356         SYSCTL_ADD_QUAD(ctx, children,
  357                 OID_AUTO, "rcv_cntrl_pkts",
  358                 CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts,
  359                 "rcv_cntrl_pkts");
  360 
  361         SYSCTL_ADD_QUAD(ctx, children,
  362                 OID_AUTO, "rcv_pkt_lt_64bytes",
  363                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes,
  364                 "rcv_pkt_lt_64bytes");
  365 
  366         SYSCTL_ADD_QUAD(ctx, children,
  367                 OID_AUTO, "rcv_pkt_lt_127bytes",
  368                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes,
  369                 "rcv_pkt_lt_127bytes");
  370 
  371         SYSCTL_ADD_QUAD(ctx, children,
  372                 OID_AUTO, "rcv_pkt_lt_255bytes",
  373                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes,
  374                 "rcv_pkt_lt_255bytes");
  375 
  376         SYSCTL_ADD_QUAD(ctx, children,
  377                 OID_AUTO, "rcv_pkt_lt_511bytes",
  378                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes,
  379                 "rcv_pkt_lt_511bytes");
  380 
  381         SYSCTL_ADD_QUAD(ctx, children,
  382                 OID_AUTO, "rcv_pkt_lt_1023bytes",
  383                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes,
  384                 "rcv_pkt_lt_1023bytes");
  385 
  386         SYSCTL_ADD_QUAD(ctx, children,
  387                 OID_AUTO, "rcv_pkt_lt_1518bytes",
  388                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes,
  389                 "rcv_pkt_lt_1518bytes");
  390 
  391         SYSCTL_ADD_QUAD(ctx, children,
  392                 OID_AUTO, "rcv_pkt_gt_1518bytes",
  393                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes,
  394                 "rcv_pkt_gt_1518bytes");
  395 
  396         SYSCTL_ADD_QUAD(ctx, children,
  397                 OID_AUTO, "rcv_len_error",
  398                 CTLFLAG_RD, &ha->hw.mac.rcv_len_error,
  399                 "rcv_len_error");
  400 
  401         SYSCTL_ADD_QUAD(ctx, children,
  402                 OID_AUTO, "rcv_len_small",
  403                 CTLFLAG_RD, &ha->hw.mac.rcv_len_small,
  404                 "rcv_len_small");
  405 
  406         SYSCTL_ADD_QUAD(ctx, children,
  407                 OID_AUTO, "rcv_len_large",
  408                 CTLFLAG_RD, &ha->hw.mac.rcv_len_large,
  409                 "rcv_len_large");
  410 
  411         SYSCTL_ADD_QUAD(ctx, children,
  412                 OID_AUTO, "rcv_jabber",
  413                 CTLFLAG_RD, &ha->hw.mac.rcv_jabber,
  414                 "rcv_jabber");
  415 
  416         SYSCTL_ADD_QUAD(ctx, children,
  417                 OID_AUTO, "rcv_dropped",
  418                 CTLFLAG_RD, &ha->hw.mac.rcv_dropped,
  419                 "rcv_dropped");
  420 
  421         SYSCTL_ADD_QUAD(ctx, children,
  422                 OID_AUTO, "fcs_error",
  423                 CTLFLAG_RD, &ha->hw.mac.fcs_error,
  424                 "fcs_error");
  425 
  426         SYSCTL_ADD_QUAD(ctx, children,
  427                 OID_AUTO, "align_error",
  428                 CTLFLAG_RD, &ha->hw.mac.align_error,
  429                 "align_error");
  430 
  431         SYSCTL_ADD_QUAD(ctx, children,
  432                 OID_AUTO, "eswitched_frames",
  433                 CTLFLAG_RD, &ha->hw.mac.eswitched_frames,
  434                 "eswitched_frames");
  435 
  436         SYSCTL_ADD_QUAD(ctx, children,
  437                 OID_AUTO, "eswitched_bytes",
  438                 CTLFLAG_RD, &ha->hw.mac.eswitched_bytes,
  439                 "eswitched_bytes");
  440 
  441         SYSCTL_ADD_QUAD(ctx, children,
  442                 OID_AUTO, "eswitched_mcast_frames",
  443                 CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames,
  444                 "eswitched_mcast_frames");
  445 
  446         SYSCTL_ADD_QUAD(ctx, children,
  447                 OID_AUTO, "eswitched_bcast_frames",
  448                 CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames,
  449                 "eswitched_bcast_frames");
  450 
  451         SYSCTL_ADD_QUAD(ctx, children,
  452                 OID_AUTO, "eswitched_ucast_frames",
  453                 CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames,
  454                 "eswitched_ucast_frames");
  455 
  456         SYSCTL_ADD_QUAD(ctx, children,
  457                 OID_AUTO, "eswitched_err_free_frames",
  458                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames,
  459                 "eswitched_err_free_frames");
  460 
  461         SYSCTL_ADD_QUAD(ctx, children,
  462                 OID_AUTO, "eswitched_err_free_bytes",
  463                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes,
  464                 "eswitched_err_free_bytes");
  465 
  466         return;
  467 }
  468 
  469 static void
  470 qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha)
  471 {
  472         struct sysctl_ctx_list  *ctx;
  473         struct sysctl_oid_list  *children;
  474         struct sysctl_oid       *ctx_oid;
  475 
  476         ctx = device_get_sysctl_ctx(ha->pci_dev);
  477         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
  478 
  479         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv",
  480             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_rcv");
  481         children = SYSCTL_CHILDREN(ctx_oid);
  482 
  483         SYSCTL_ADD_QUAD(ctx, children,
  484                 OID_AUTO, "total_bytes",
  485                 CTLFLAG_RD, &ha->hw.rcv.total_bytes,
  486                 "total_bytes");
  487 
  488         SYSCTL_ADD_QUAD(ctx, children,
  489                 OID_AUTO, "total_pkts",
  490                 CTLFLAG_RD, &ha->hw.rcv.total_pkts,
  491                 "total_pkts");
  492 
  493         SYSCTL_ADD_QUAD(ctx, children,
  494                 OID_AUTO, "lro_pkt_count",
  495                 CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count,
  496                 "lro_pkt_count");
  497 
  498         SYSCTL_ADD_QUAD(ctx, children,
  499                 OID_AUTO, "sw_pkt_count",
  500                 CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count,
  501                 "sw_pkt_count");
  502 
  503         SYSCTL_ADD_QUAD(ctx, children,
  504                 OID_AUTO, "ip_chksum_err",
  505                 CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err,
  506                 "ip_chksum_err");
  507 
  508         SYSCTL_ADD_QUAD(ctx, children,
  509                 OID_AUTO, "pkts_wo_acntxts",
  510                 CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts,
  511                 "pkts_wo_acntxts");
  512 
  513         SYSCTL_ADD_QUAD(ctx, children,
  514                 OID_AUTO, "pkts_dropped_no_sds_card",
  515                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card,
  516                 "pkts_dropped_no_sds_card");
  517 
  518         SYSCTL_ADD_QUAD(ctx, children,
  519                 OID_AUTO, "pkts_dropped_no_sds_host",
  520                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host,
  521                 "pkts_dropped_no_sds_host");
  522 
  523         SYSCTL_ADD_QUAD(ctx, children,
  524                 OID_AUTO, "oversized_pkts",
  525                 CTLFLAG_RD, &ha->hw.rcv.oversized_pkts,
  526                 "oversized_pkts");
  527 
  528         SYSCTL_ADD_QUAD(ctx, children,
  529                 OID_AUTO, "pkts_dropped_no_rds",
  530                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds,
  531                 "pkts_dropped_no_rds");
  532 
  533         SYSCTL_ADD_QUAD(ctx, children,
  534                 OID_AUTO, "unxpctd_mcast_pkts",
  535                 CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts,
  536                 "unxpctd_mcast_pkts");
  537 
  538         SYSCTL_ADD_QUAD(ctx, children,
  539                 OID_AUTO, "re1_fbq_error",
  540                 CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error,
  541                 "re1_fbq_error");
  542 
  543         SYSCTL_ADD_QUAD(ctx, children,
  544                 OID_AUTO, "invalid_mac_addr",
  545                 CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr,
  546                 "invalid_mac_addr");
  547 
  548         SYSCTL_ADD_QUAD(ctx, children,
  549                 OID_AUTO, "rds_prime_trys",
  550                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys,
  551                 "rds_prime_trys");
  552 
  553         SYSCTL_ADD_QUAD(ctx, children,
  554                 OID_AUTO, "rds_prime_success",
  555                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_success,
  556                 "rds_prime_success");
  557 
  558         SYSCTL_ADD_QUAD(ctx, children,
  559                 OID_AUTO, "lro_flows_added",
  560                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_added,
  561                 "lro_flows_added");
  562 
  563         SYSCTL_ADD_QUAD(ctx, children,
  564                 OID_AUTO, "lro_flows_deleted",
  565                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted,
  566                 "lro_flows_deleted");
  567 
  568         SYSCTL_ADD_QUAD(ctx, children,
  569                 OID_AUTO, "lro_flows_active",
  570                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_active,
  571                 "lro_flows_active");
  572 
  573         SYSCTL_ADD_QUAD(ctx, children,
  574                 OID_AUTO, "pkts_droped_unknown",
  575                 CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown,
  576                 "pkts_droped_unknown");
  577 
  578         SYSCTL_ADD_QUAD(ctx, children,
  579                 OID_AUTO, "pkts_cnt_oversized",
  580                 CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized,
  581                 "pkts_cnt_oversized");
  582 
  583         return;
  584 }
  585 
  586 static void
  587 qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha)
  588 {
  589         struct sysctl_ctx_list  *ctx;
  590         struct sysctl_oid_list  *children;
  591         struct sysctl_oid_list  *node_children;
  592         struct sysctl_oid       *ctx_oid;
  593         int                     i;
  594         uint8_t                 name_str[16];
  595 
  596         ctx = device_get_sysctl_ctx(ha->pci_dev);
  597         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
  598 
  599         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt",
  600             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_xmt");
  601         children = SYSCTL_CHILDREN(ctx_oid);
  602 
  603         for (i = 0; i < ha->hw.num_tx_rings; i++) {
  604                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
  605                 snprintf(name_str, sizeof(name_str), "%d", i);
  606 
  607                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
  608                     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
  609                 node_children = SYSCTL_CHILDREN(ctx_oid);
  610 
  611                 /* Tx Related */
  612 
  613                 SYSCTL_ADD_QUAD(ctx, node_children,
  614                         OID_AUTO, "total_bytes",
  615                         CTLFLAG_RD, &ha->hw.xmt[i].total_bytes,
  616                         "total_bytes");
  617 
  618                 SYSCTL_ADD_QUAD(ctx, node_children,
  619                         OID_AUTO, "total_pkts",
  620                         CTLFLAG_RD, &ha->hw.xmt[i].total_pkts,
  621                         "total_pkts");
  622 
  623                 SYSCTL_ADD_QUAD(ctx, node_children,
  624                         OID_AUTO, "errors",
  625                         CTLFLAG_RD, &ha->hw.xmt[i].errors,
  626                         "errors");
  627 
  628                 SYSCTL_ADD_QUAD(ctx, node_children,
  629                         OID_AUTO, "pkts_dropped",
  630                         CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped,
  631                         "pkts_dropped");
  632 
  633                 SYSCTL_ADD_QUAD(ctx, node_children,
  634                         OID_AUTO, "switch_pkts",
  635                         CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts,
  636                         "switch_pkts");
  637 
  638                 SYSCTL_ADD_QUAD(ctx, node_children,
  639                         OID_AUTO, "num_buffers",
  640                         CTLFLAG_RD, &ha->hw.xmt[i].num_buffers,
  641                         "num_buffers");
  642         }
  643 
  644         return;
  645 }
  646 
  647 static void
  648 qlnx_add_hw_mbx_cmpl_stats_sysctls(qla_host_t *ha)
  649 {
  650         struct sysctl_ctx_list  *ctx;
  651         struct sysctl_oid_list  *node_children;
  652 
  653         ctx = device_get_sysctl_ctx(ha->pci_dev);
  654         node_children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
  655 
  656         SYSCTL_ADD_QUAD(ctx, node_children,
  657                 OID_AUTO, "mbx_completion_time_lt_200ms",
  658                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[0],
  659                 "mbx_completion_time_lt_200ms");
  660 
  661         SYSCTL_ADD_QUAD(ctx, node_children,
  662                 OID_AUTO, "mbx_completion_time_200ms_400ms",
  663                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[1],
  664                 "mbx_completion_time_200ms_400ms");
  665 
  666         SYSCTL_ADD_QUAD(ctx, node_children,
  667                 OID_AUTO, "mbx_completion_time_400ms_600ms",
  668                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[2],
  669                 "mbx_completion_time_400ms_600ms");
  670 
  671         SYSCTL_ADD_QUAD(ctx, node_children,
  672                 OID_AUTO, "mbx_completion_time_600ms_800ms",
  673                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[3],
  674                 "mbx_completion_time_600ms_800ms");
  675 
  676         SYSCTL_ADD_QUAD(ctx, node_children,
  677                 OID_AUTO, "mbx_completion_time_800ms_1000ms",
  678                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[4],
  679                 "mbx_completion_time_800ms_1000ms");
  680 
  681         SYSCTL_ADD_QUAD(ctx, node_children,
  682                 OID_AUTO, "mbx_completion_time_1000ms_1200ms",
  683                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[5],
  684                 "mbx_completion_time_1000ms_1200ms");
  685 
  686         SYSCTL_ADD_QUAD(ctx, node_children,
  687                 OID_AUTO, "mbx_completion_time_1200ms_1400ms",
  688                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[6],
  689                 "mbx_completion_time_1200ms_1400ms");
  690 
  691         SYSCTL_ADD_QUAD(ctx, node_children,
  692                 OID_AUTO, "mbx_completion_time_1400ms_1600ms",
  693                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[7],
  694                 "mbx_completion_time_1400ms_1600ms");
  695 
  696         SYSCTL_ADD_QUAD(ctx, node_children,
  697                 OID_AUTO, "mbx_completion_time_1600ms_1800ms",
  698                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[8],
  699                 "mbx_completion_time_1600ms_1800ms");
  700 
  701         SYSCTL_ADD_QUAD(ctx, node_children,
  702                 OID_AUTO, "mbx_completion_time_1800ms_2000ms",
  703                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[9],
  704                 "mbx_completion_time_1800ms_2000ms");
  705 
  706         SYSCTL_ADD_QUAD(ctx, node_children,
  707                 OID_AUTO, "mbx_completion_time_2000ms_2200ms",
  708                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[10],
  709                 "mbx_completion_time_2000ms_2200ms");
  710 
  711         SYSCTL_ADD_QUAD(ctx, node_children,
  712                 OID_AUTO, "mbx_completion_time_2200ms_2400ms",
  713                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[11],
  714                 "mbx_completion_time_2200ms_2400ms");
  715 
  716         SYSCTL_ADD_QUAD(ctx, node_children,
  717                 OID_AUTO, "mbx_completion_time_2400ms_2600ms",
  718                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[12],
  719                 "mbx_completion_time_2400ms_2600ms");
  720 
  721         SYSCTL_ADD_QUAD(ctx, node_children,
  722                 OID_AUTO, "mbx_completion_time_2600ms_2800ms",
  723                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[13],
  724                 "mbx_completion_time_2600ms_2800ms");
  725 
  726         SYSCTL_ADD_QUAD(ctx, node_children,
  727                 OID_AUTO, "mbx_completion_time_2800ms_3000ms",
  728                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[14],
  729                 "mbx_completion_time_2800ms_3000ms");
  730 
  731         SYSCTL_ADD_QUAD(ctx, node_children,
  732                 OID_AUTO, "mbx_completion_time_3000ms_4000ms",
  733                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[15],
  734                 "mbx_completion_time_3000ms_4000ms");
  735 
  736         SYSCTL_ADD_QUAD(ctx, node_children,
  737                 OID_AUTO, "mbx_completion_time_4000ms_5000ms",
  738                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[16],
  739                 "mbx_completion_time_4000ms_5000ms");
  740 
  741         SYSCTL_ADD_QUAD(ctx, node_children,
  742                 OID_AUTO, "mbx_completion_host_mbx_cntrl_timeout",
  743                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[17],
  744                 "mbx_completion_host_mbx_cntrl_timeout");
  745 
  746         SYSCTL_ADD_QUAD(ctx, node_children,
  747                 OID_AUTO, "mbx_completion_fw_mbx_cntrl_timeout",
  748                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[18],
  749                 "mbx_completion_fw_mbx_cntrl_timeout");
  750         return;
  751 }
  752 
  753 static void
  754 qlnx_add_hw_stats_sysctls(qla_host_t *ha)
  755 {
  756         qlnx_add_hw_mac_stats_sysctls(ha);
  757         qlnx_add_hw_rcv_stats_sysctls(ha);
  758         qlnx_add_hw_xmt_stats_sysctls(ha);
  759         qlnx_add_hw_mbx_cmpl_stats_sysctls(ha);
  760 
  761         return;
  762 }
  763 
  764 static void
  765 qlnx_add_drvr_sds_stats(qla_host_t *ha)
  766 {
  767         struct sysctl_ctx_list  *ctx;
  768         struct sysctl_oid_list  *children;
  769         struct sysctl_oid_list  *node_children;
  770         struct sysctl_oid       *ctx_oid;
  771         int                     i;
  772         uint8_t                 name_str[16];
  773 
  774         ctx = device_get_sysctl_ctx(ha->pci_dev);
  775         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
  776 
  777         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds",
  778             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_sds");
  779         children = SYSCTL_CHILDREN(ctx_oid);
  780 
  781         for (i = 0; i < ha->hw.num_sds_rings; i++) {
  782                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
  783                 snprintf(name_str, sizeof(name_str), "%d", i);
  784 
  785                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
  786                     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
  787                 node_children = SYSCTL_CHILDREN(ctx_oid);
  788 
  789                 SYSCTL_ADD_QUAD(ctx, node_children,
  790                         OID_AUTO, "intr_count",
  791                         CTLFLAG_RD, &ha->hw.sds[i].intr_count,
  792                         "intr_count");
  793 
  794                 SYSCTL_ADD_UINT(ctx, node_children,
  795                         OID_AUTO, "rx_free",
  796                         CTLFLAG_RD, &ha->hw.sds[i].rx_free,
  797                         ha->hw.sds[i].rx_free, "rx_free");
  798         }
  799 
  800         return;
  801 }
  802 static void
  803 qlnx_add_drvr_rds_stats(qla_host_t *ha)
  804 {
  805         struct sysctl_ctx_list  *ctx;
  806         struct sysctl_oid_list  *children;
  807         struct sysctl_oid_list  *node_children;
  808         struct sysctl_oid       *ctx_oid;
  809         int                     i;
  810         uint8_t                 name_str[16];
  811 
  812         ctx = device_get_sysctl_ctx(ha->pci_dev);
  813         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
  814 
  815         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds",
  816             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_rds");
  817         children = SYSCTL_CHILDREN(ctx_oid);
  818 
  819         for (i = 0; i < ha->hw.num_rds_rings; i++) {
  820                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
  821                 snprintf(name_str, sizeof(name_str), "%d", i);
  822 
  823                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
  824                     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
  825                 node_children = SYSCTL_CHILDREN(ctx_oid);
  826 
  827                 SYSCTL_ADD_QUAD(ctx, node_children,
  828                         OID_AUTO, "count",
  829                         CTLFLAG_RD, &ha->hw.rds[i].count,
  830                         "count");
  831 
  832                 SYSCTL_ADD_QUAD(ctx, node_children,
  833                         OID_AUTO, "lro_pkt_count",
  834                         CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count,
  835                         "lro_pkt_count");
  836 
  837                 SYSCTL_ADD_QUAD(ctx, node_children,
  838                         OID_AUTO, "lro_bytes",
  839                         CTLFLAG_RD, &ha->hw.rds[i].lro_bytes,
  840                         "lro_bytes");
  841         }
  842 
  843         return;
  844 }
  845 
  846 static void
  847 qlnx_add_drvr_tx_stats(qla_host_t *ha)
  848 {
  849         struct sysctl_ctx_list  *ctx;
  850         struct sysctl_oid_list  *children;
  851         struct sysctl_oid_list  *node_children;
  852         struct sysctl_oid       *ctx_oid;
  853         int                     i;
  854         uint8_t                 name_str[16];
  855 
  856         ctx = device_get_sysctl_ctx(ha->pci_dev);
  857         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
  858 
  859         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt",
  860             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_xmt");
  861         children = SYSCTL_CHILDREN(ctx_oid);
  862 
  863         for (i = 0; i < ha->hw.num_tx_rings; i++) {
  864                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
  865                 snprintf(name_str, sizeof(name_str), "%d", i);
  866 
  867                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
  868                     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
  869                 node_children = SYSCTL_CHILDREN(ctx_oid);
  870 
  871                 SYSCTL_ADD_QUAD(ctx, node_children,
  872                         OID_AUTO, "count",
  873                         CTLFLAG_RD, &ha->tx_ring[i].count,
  874                         "count");
  875 
  876 #ifdef QL_ENABLE_ISCSI_TLV
  877                 SYSCTL_ADD_QUAD(ctx, node_children,
  878                         OID_AUTO, "iscsi_pkt_count",
  879                         CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count,
  880                         "iscsi_pkt_count");
  881 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
  882         }
  883 
  884         return;
  885 }
  886 
  887 static void
  888 qlnx_add_drvr_stats_sysctls(qla_host_t *ha)
  889 {
  890         qlnx_add_drvr_sds_stats(ha);
  891         qlnx_add_drvr_rds_stats(ha);
  892         qlnx_add_drvr_tx_stats(ha);
  893         return;
  894 }
  895 
  896 /*
  897  * Name: ql_hw_add_sysctls
  898  * Function: Add P3Plus specific sysctls
  899  */
  900 void
  901 ql_hw_add_sysctls(qla_host_t *ha)
  902 {
  903         device_t        dev;
  904 
  905         dev = ha->pci_dev;
  906 
  907         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
  908                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  909                 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
  910                 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
  911 
  912         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
  913                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  914                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
  915                 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
  916 
  917         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
  918                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  919                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
  920                 ha->hw.num_tx_rings, "Number of Transmit Rings");
  921 
  922         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
  923                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  924                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
  925                 ha->txr_idx, "Tx Ring Used");
  926 
  927         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
  928                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  929                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
  930                 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
  931 
  932         ha->hw.sds_cidx_thres = 32;
  933         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
  934                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  935                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
  936                 ha->hw.sds_cidx_thres,
  937                 "Number of SDS entries to process before updating"
  938                 " SDS Ring Consumer Index");
  939 
  940         ha->hw.rds_pidx_thres = 32;
  941         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
  942                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  943                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
  944                 ha->hw.rds_pidx_thres,
  945                 "Number of Rcv Rings Entries to post before updating"
  946                 " RDS Ring Producer Index");
  947 
  948         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
  949         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
  950                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  951                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
  952                 &ha->hw.rcv_intr_coalesce,
  953                 ha->hw.rcv_intr_coalesce,
  954                 "Rcv Intr Coalescing Parameters\n"
  955                 "\tbits 15:0 max packets\n"
  956                 "\tbits 31:16 max micro-seconds to wait\n"
  957                 "\tplease run\n"
  958                 "\tifconfig <if> down && ifconfig <if> up\n"
  959                 "\tto take effect \n");
  960 
  961         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
  962         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
  963                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  964                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
  965                 &ha->hw.xmt_intr_coalesce,
  966                 ha->hw.xmt_intr_coalesce,
  967                 "Xmt Intr Coalescing Parameters\n"
  968                 "\tbits 15:0 max packets\n"
  969                 "\tbits 31:16 max micro-seconds to wait\n"
  970                 "\tplease run\n"
  971                 "\tifconfig <if> down && ifconfig <if> up\n"
  972                 "\tto take effect \n");
  973 
  974         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  975             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
  976             "port_cfg", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
  977             (void *)ha, 0, qla_sysctl_port_cfg, "I",
  978             "Set Port Configuration if values below "
  979             "otherwise Get Port Configuration\n"
  980             "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
  981             "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
  982             "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
  983             " 1 = xmt only; 2 = rcv only;\n");
  984 
  985         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  986             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
  987             "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
  988             (void *)ha, 0, qla_sysctl_set_cam_search_mode, "I",
  989             "Set CAM Search Mode"
  990             "\t 1 = search mode internal\n"
  991             "\t 2 = search mode auto\n");
  992 
  993         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  994                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
  995                 "get_cam_search_mode",
  996                 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
  997                 qla_sysctl_get_cam_search_mode, "I",
  998                 "Get CAM Search Mode"
  999                 "\t 1 = search mode internal\n"
 1000                 "\t 2 = search mode auto\n");
 1001 
 1002         ha->hw.enable_9kb = 1;
 1003 
 1004         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
 1005                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1006                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
 1007                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
 1008 
 1009         ha->hw.enable_hw_lro = 1;
 1010 
 1011         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
 1012                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1013                 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
 1014                 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
 1015                 "\t 1 : Hardware LRO if LRO is enabled\n"
 1016                 "\t 0 : Software LRO if LRO is enabled\n"
 1017                 "\t Any change requires ifconfig down/up to take effect\n"
 1018                 "\t Note that LRO may be turned off/on via ifconfig\n");
 1019 
 1020         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
 1021                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1022                 OID_AUTO, "sp_log_index", CTLFLAG_RW, &ha->hw.sp_log_index,
 1023                 ha->hw.sp_log_index, "sp_log_index");
 1024 
 1025         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
 1026                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1027                 OID_AUTO, "sp_log_stop", CTLFLAG_RW, &ha->hw.sp_log_stop,
 1028                 ha->hw.sp_log_stop, "sp_log_stop");
 1029 
 1030         ha->hw.sp_log_stop_events = 0;
 1031 
 1032         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
 1033                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1034                 OID_AUTO, "sp_log_stop_events", CTLFLAG_RW,
 1035                 &ha->hw.sp_log_stop_events,
 1036                 ha->hw.sp_log_stop_events, "Slow path event log is stopped"
 1037                 " when OR of the following events occur \n"
 1038                 "\t 0x01 : Heart beat Failure\n"
 1039                 "\t 0x02 : Temperature Failure\n"
 1040                 "\t 0x04 : HW Initialization Failure\n"
 1041                 "\t 0x08 : Interface Initialization Failure\n"
 1042                 "\t 0x10 : Error Recovery Failure\n");
 1043 
 1044         ha->hw.mdump_active = 0;
 1045         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
 1046                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1047                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
 1048                 ha->hw.mdump_active,
 1049                 "Minidump retrieval is Active");
 1050 
 1051         ha->hw.mdump_done = 0;
 1052         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
 1053                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1054                 OID_AUTO, "mdump_done", CTLFLAG_RW,
 1055                 &ha->hw.mdump_done, ha->hw.mdump_done,
 1056                 "Minidump has been done and available for retrieval");
 1057 
 1058         ha->hw.mdump_capture_mask = 0xF;
 1059         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
 1060                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1061                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
 1062                 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
 1063                 "Minidump capture mask");
 1064 #ifdef QL_DBG
 1065 
 1066         ha->err_inject = 0;
 1067         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
 1068                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1069                 OID_AUTO, "err_inject",
 1070                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
 1071                 "Error to be injected\n"
 1072                 "\t\t\t 0: No Errors\n"
 1073                 "\t\t\t 1: rcv: rxb struct invalid\n"
 1074                 "\t\t\t 2: rcv: mp == NULL\n"
 1075                 "\t\t\t 3: lro: rxb struct invalid\n"
 1076                 "\t\t\t 4: lro: mp == NULL\n"
 1077                 "\t\t\t 5: rcv: num handles invalid\n"
 1078                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
 1079                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
 1080                 "\t\t\t 8: mbx: mailbox command failure\n"
 1081                 "\t\t\t 9: heartbeat failure\n"
 1082                 "\t\t\t A: temperature failure\n"
 1083                 "\t\t\t 11: m_getcl or m_getjcl failure\n"
 1084                 "\t\t\t 13: Invalid Descriptor Count in SGL Receive\n"
 1085                 "\t\t\t 14: Invalid Descriptor Count in LRO Receive\n"
 1086                 "\t\t\t 15: peer port error recovery failure\n"
 1087                 "\t\t\t 16: tx_buf[next_prod_index].mbuf != NULL\n" );
 1088 
 1089         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
 1090             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
 1091             "peg_stop", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
 1092             (void *)ha, 0, qla_sysctl_stop_pegs, "I", "Peg Stop");
 1093 
 1094 #endif /* #ifdef QL_DBG */
 1095 
 1096         ha->hw.user_pri_nic = 0;
 1097         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
 1098                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1099                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
 1100                 ha->hw.user_pri_nic,
 1101                 "VLAN Tag User Priority for Normal Ethernet Packets");
 1102 
 1103         ha->hw.user_pri_iscsi = 4;
 1104         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
 1105                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1106                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
 1107                 ha->hw.user_pri_iscsi,
 1108                 "VLAN Tag User Priority for iSCSI Packets");
 1109 
 1110         qlnx_add_hw_stats_sysctls(ha);
 1111         qlnx_add_drvr_stats_sysctls(ha);
 1112 
 1113         return;
 1114 }
 1115 
 1116 void
 1117 ql_hw_link_status(qla_host_t *ha)
 1118 {
 1119         device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
 1120 
 1121         if (ha->hw.link_up) {
 1122                 device_printf(ha->pci_dev, "link Up\n");
 1123         } else {
 1124                 device_printf(ha->pci_dev, "link Down\n");
 1125         }
 1126 
 1127         if (ha->hw.fduplex) {
 1128                 device_printf(ha->pci_dev, "Full Duplex\n");
 1129         } else {
 1130                 device_printf(ha->pci_dev, "Half Duplex\n");
 1131         }
 1132 
 1133         if (ha->hw.autoneg) {
 1134                 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
 1135         } else {
 1136                 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
 1137         }
 1138 
 1139         switch (ha->hw.link_speed) {
 1140         case 0x710:
 1141                 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
 1142                 break;
 1143 
 1144         case 0x3E8:
 1145                 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
 1146                 break;
 1147 
 1148         case 0x64:
 1149                 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
 1150                 break;
 1151 
 1152         default:
 1153                 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
 1154                 break;
 1155         }
 1156 
 1157         switch (ha->hw.module_type) {
 1158         case 0x01:
 1159                 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
 1160                 break;
 1161 
 1162         case 0x02:
 1163                 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
 1164                 break;
 1165 
 1166         case 0x03:
 1167                 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
 1168                 break;
 1169 
 1170         case 0x04:
 1171                 device_printf(ha->pci_dev,
 1172                         "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
 1173                         ha->hw.cable_length);
 1174                 break;
 1175 
 1176         case 0x05:
 1177                 device_printf(ha->pci_dev, "Module Type 10GE Active"
 1178                         " Limiting Copper(Compliant)[%d m]\n",
 1179                         ha->hw.cable_length);
 1180                 break;
 1181 
 1182         case 0x06:
 1183                 device_printf(ha->pci_dev,
 1184                         "Module Type 10GE Passive Copper"
 1185                         " (Legacy, Best Effort)[%d m]\n",
 1186                         ha->hw.cable_length);
 1187                 break;
 1188 
 1189         case 0x07:
 1190                 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
 1191                 break;
 1192 
 1193         case 0x08:
 1194                 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
 1195                 break;
 1196 
 1197         case 0x09:
 1198                 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
 1199                 break;
 1200 
 1201         case 0x0A:
 1202                 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
 1203                 break;
 1204 
 1205         case 0x0B:
 1206                 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
 1207                         "(Legacy, Best Effort)\n");
 1208                 break;
 1209 
 1210         default:
 1211                 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
 1212                         ha->hw.module_type);
 1213                 break;
 1214         }
 1215 
 1216         if (ha->hw.link_faults == 1)
 1217                 device_printf(ha->pci_dev, "SFP Power Fault\n");
 1218 }
 1219 
 1220 /*
 1221  * Name: ql_free_dma
 1222  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
 1223  */
 1224 void
 1225 ql_free_dma(qla_host_t *ha)
 1226 {
 1227         uint32_t i;
 1228 
 1229         if (ha->hw.dma_buf.flags.sds_ring) {
 1230                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
 1231                         ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
 1232                 }
 1233                 ha->hw.dma_buf.flags.sds_ring = 0;
 1234         }
 1235 
 1236         if (ha->hw.dma_buf.flags.rds_ring) {
 1237                 for (i = 0; i < ha->hw.num_rds_rings; i++) {
 1238                         ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
 1239                 }
 1240                 ha->hw.dma_buf.flags.rds_ring = 0;
 1241         }
 1242 
 1243         if (ha->hw.dma_buf.flags.tx_ring) {
 1244                 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
 1245                 ha->hw.dma_buf.flags.tx_ring = 0;
 1246         }
 1247         ql_minidump_free(ha);
 1248 }
 1249 
 1250 /*
 1251  * Name: ql_alloc_dma
 1252  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
 1253  */
 1254 int
 1255 ql_alloc_dma(qla_host_t *ha)
 1256 {
 1257         device_t                dev;
 1258         uint32_t                i, j, size, tx_ring_size;
 1259         qla_hw_t                *hw;
 1260         qla_hw_tx_cntxt_t       *tx_cntxt;
 1261         uint8_t                 *vaddr;
 1262         bus_addr_t              paddr;
 1263 
 1264         dev = ha->pci_dev;
 1265 
 1266         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
 1267 
 1268         hw = &ha->hw;
 1269         /*
 1270          * Allocate Transmit Ring
 1271          */
 1272         tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
 1273         size = (tx_ring_size * ha->hw.num_tx_rings);
 1274 
 1275         hw->dma_buf.tx_ring.alignment = 8;
 1276         hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
 1277 
 1278         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
 1279                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
 1280                 goto ql_alloc_dma_exit;
 1281         }
 1282 
 1283         vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
 1284         paddr = hw->dma_buf.tx_ring.dma_addr;
 1285 
 1286         for (i = 0; i < ha->hw.num_tx_rings; i++) {
 1287                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
 1288 
 1289                 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
 1290                 tx_cntxt->tx_ring_paddr = paddr;
 1291 
 1292                 vaddr += tx_ring_size;
 1293                 paddr += tx_ring_size;
 1294         }
 1295 
 1296         for (i = 0; i < ha->hw.num_tx_rings; i++) {
 1297                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
 1298 
 1299                 tx_cntxt->tx_cons = (uint32_t *)vaddr;
 1300                 tx_cntxt->tx_cons_paddr = paddr;
 1301 
 1302                 vaddr += sizeof (uint32_t);
 1303                 paddr += sizeof (uint32_t);
 1304         }
 1305 
 1306         ha->hw.dma_buf.flags.tx_ring = 1;
 1307 
 1308         QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
 1309                 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
 1310                 hw->dma_buf.tx_ring.dma_b));
 1311         /*
 1312          * Allocate Receive Descriptor Rings
 1313          */
 1314 
 1315         for (i = 0; i < hw->num_rds_rings; i++) {
 1316                 hw->dma_buf.rds_ring[i].alignment = 8;
 1317                 hw->dma_buf.rds_ring[i].size =
 1318                         (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
 1319 
 1320                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
 1321                         device_printf(dev, "%s: rds ring[%d] alloc failed\n",
 1322                                 __func__, i);
 1323 
 1324                         for (j = 0; j < i; j++)
 1325                                 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
 1326 
 1327                         goto ql_alloc_dma_exit;
 1328                 }
 1329                 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
 1330                         __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
 1331                         hw->dma_buf.rds_ring[i].dma_b));
 1332         }
 1333 
 1334         hw->dma_buf.flags.rds_ring = 1;
 1335 
 1336         /*
 1337          * Allocate Status Descriptor Rings
 1338          */
 1339 
 1340         for (i = 0; i < hw->num_sds_rings; i++) {
 1341                 hw->dma_buf.sds_ring[i].alignment = 8;
 1342                 hw->dma_buf.sds_ring[i].size =
 1343                         (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
 1344 
 1345                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
 1346                         device_printf(dev, "%s: sds ring alloc failed\n",
 1347                                 __func__);
 1348 
 1349                         for (j = 0; j < i; j++)
 1350                                 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
 1351 
 1352                         goto ql_alloc_dma_exit;
 1353                 }
 1354                 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
 1355                         __func__, i,
 1356                         (void *)(hw->dma_buf.sds_ring[i].dma_addr),
 1357                         hw->dma_buf.sds_ring[i].dma_b));
 1358         }
 1359         for (i = 0; i < hw->num_sds_rings; i++) {
 1360                 hw->sds[i].sds_ring_base =
 1361                         (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
 1362         }
 1363 
 1364         hw->dma_buf.flags.sds_ring = 1;
 1365 
 1366         return 0;
 1367 
 1368 ql_alloc_dma_exit:
 1369         ql_free_dma(ha);
 1370         return -1;
 1371 }
 1372 
 1373 #define Q8_MBX_MSEC_DELAY       5000
 1374 
 1375 static int
 1376 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
 1377         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
 1378 {
 1379         uint32_t i;
 1380         uint32_t data;
 1381         int ret = 0;
 1382         uint64_t start_usecs;
 1383         uint64_t end_usecs;
 1384         uint64_t msecs_200;
 1385 
 1386         ql_sp_log(ha, 0, 5, no_pause, h_mbox[0], h_mbox[1], h_mbox[2], h_mbox[3]);
 1387 
 1388         if (ha->offline || ha->qla_initiate_recovery) {
 1389                 ql_sp_log(ha, 1, 2, ha->offline, ha->qla_initiate_recovery, 0, 0, 0);
 1390                 goto exit_qla_mbx_cmd;
 1391         }
 1392 
 1393         if (((ha->err_inject & 0xFFFF) == INJCT_MBX_CMD_FAILURE) &&
 1394                 (((ha->err_inject & ~0xFFFF) == ((h_mbox[0] & 0xFFFF) << 16))||
 1395                 !(ha->err_inject & ~0xFFFF))) {
 1396                 ret = -3;
 1397                 QL_INITIATE_RECOVERY(ha);
 1398                 goto exit_qla_mbx_cmd;
 1399         }
 1400 
 1401         start_usecs = qla_get_usec_timestamp();
 1402 
 1403         if (no_pause)
 1404                 i = 1000;
 1405         else
 1406                 i = Q8_MBX_MSEC_DELAY;
 1407 
 1408         while (i) {
 1409                 if (ha->qla_initiate_recovery) {
 1410                         ql_sp_log(ha, 2, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
 1411                         return (-1);
 1412                 }
 1413 
 1414                 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
 1415                 if (data == 0)
 1416                         break;
 1417                 if (no_pause) {
 1418                         DELAY(1000);
 1419                 } else {
 1420                         qla_mdelay(__func__, 1);
 1421                 }
 1422                 i--;
 1423         }
 1424 
 1425         if (i == 0) {
 1426                 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
 1427                         __func__, data);
 1428                 ql_sp_log(ha, 3, 1, data, 0, 0, 0, 0);
 1429                 ret = -1;
 1430                 ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 2)]++;
 1431                 QL_INITIATE_RECOVERY(ha);
 1432                 goto exit_qla_mbx_cmd;
 1433         }
 1434 
 1435         for (i = 0; i < n_hmbox; i++) {
 1436                 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
 1437                 h_mbox++;
 1438         }
 1439 
 1440         WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
 1441 
 1442         i = Q8_MBX_MSEC_DELAY;
 1443         while (i) {
 1444                 if (ha->qla_initiate_recovery) {
 1445                         ql_sp_log(ha, 4, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
 1446                         return (-1);
 1447                 }
 1448 
 1449                 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
 1450 
 1451                 if ((data & 0x3) == 1) {
 1452                         data = READ_REG32(ha, Q8_FW_MBOX0);
 1453                         if ((data & 0xF000) != 0x8000)
 1454                                 break;
 1455                 }
 1456                 if (no_pause) {
 1457                         DELAY(1000);
 1458                 } else {
 1459                         qla_mdelay(__func__, 1);
 1460                 }
 1461                 i--;
 1462         }
 1463         if (i == 0) {
 1464                 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
 1465                         __func__, data);
 1466                 ql_sp_log(ha, 5, 1, data, 0, 0, 0, 0);
 1467                 ret = -2;
 1468                 ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 1)]++;
 1469                 QL_INITIATE_RECOVERY(ha);
 1470                 goto exit_qla_mbx_cmd;
 1471         }
 1472 
 1473         for (i = 0; i < n_fwmbox; i++) {
 1474                 if (ha->qla_initiate_recovery) {
 1475                         ql_sp_log(ha, 6, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
 1476                         return (-1);
 1477                 }
 1478 
 1479                 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
 1480         }
 1481 
 1482         WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
 1483         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
 1484 
 1485         end_usecs = qla_get_usec_timestamp();
 1486 
 1487         if (end_usecs > start_usecs) {
 1488                 msecs_200 = (end_usecs - start_usecs)/(1000 * 200);
 1489 
 1490                 if (msecs_200 < 15) 
 1491                         ha->hw.mbx_comp_msecs[msecs_200]++;
 1492                 else if (msecs_200 < 20)
 1493                         ha->hw.mbx_comp_msecs[15]++;
 1494                 else {
 1495                         device_printf(ha->pci_dev, "%s: [%ld, %ld] %ld\n", __func__,
 1496                                 start_usecs, end_usecs, msecs_200);
 1497                         ha->hw.mbx_comp_msecs[16]++;
 1498                 }
 1499         }
 1500         ql_sp_log(ha, 7, 5, fw_mbox[0], fw_mbox[1], fw_mbox[2], fw_mbox[3], fw_mbox[4]);
 1501 
 1502 exit_qla_mbx_cmd:
 1503         return (ret);
 1504 }
 1505 
 1506 int
 1507 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
 1508         uint32_t *num_rcvq)
 1509 {
 1510         uint32_t *mbox, err;
 1511         device_t dev = ha->pci_dev;
 1512 
 1513         bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
 1514 
 1515         mbox = ha->hw.mbox;
 1516 
 1517         mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 
 1518 
 1519         if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
 1520                 device_printf(dev, "%s: failed0\n", __func__);
 1521                 return (-1);
 1522         }
 1523         err = mbox[0] >> 25; 
 1524 
 1525         if (supports_9kb != NULL) {
 1526                 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
 1527                         *supports_9kb = 1;
 1528                 else
 1529                         *supports_9kb = 0;
 1530         }
 1531 
 1532         if (num_rcvq != NULL)
 1533                 *num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
 1534 
 1535         if ((err != 1) && (err != 0)) {
 1536                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 1537                 return (-1);
 1538         }
 1539         return 0;
 1540 }
 1541 
 1542 static int
 1543 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
 1544         uint32_t create)
 1545 {
 1546         uint32_t i, err;
 1547         device_t dev = ha->pci_dev;
 1548         q80_config_intr_t *c_intr;
 1549         q80_config_intr_rsp_t *c_intr_rsp;
 1550 
 1551         c_intr = (q80_config_intr_t *)ha->hw.mbox;
 1552         bzero(c_intr, (sizeof (q80_config_intr_t)));
 1553 
 1554         c_intr->opcode = Q8_MBX_CONFIG_INTR;
 1555 
 1556         c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
 1557         c_intr->count_version |= Q8_MBX_CMD_VERSION;
 1558 
 1559         c_intr->nentries = num_intrs;
 1560 
 1561         for (i = 0; i < num_intrs; i++) {
 1562                 if (create) {
 1563                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
 1564                         c_intr->intr[i].msix_index = start_idx + 1 + i;
 1565                 } else {
 1566                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
 1567                         c_intr->intr[i].msix_index =
 1568                                 ha->hw.intr_id[(start_idx + i)];
 1569                 }
 1570 
 1571                 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
 1572         }
 1573 
 1574         if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
 1575                 (sizeof (q80_config_intr_t) >> 2),
 1576                 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
 1577                 device_printf(dev, "%s: %s failed0\n", __func__,
 1578                         (create ? "create" : "delete"));
 1579                 return (-1);
 1580         }
 1581 
 1582         c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
 1583 
 1584         err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
 1585 
 1586         if (err) {
 1587                 device_printf(dev, "%s: %s failed1 [0x%08x, %d]\n", __func__,
 1588                         (create ? "create" : "delete"), err, c_intr_rsp->nentries);
 1589 
 1590                 for (i = 0; i < c_intr_rsp->nentries; i++) {
 1591                         device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
 1592                                 __func__, i, 
 1593                                 c_intr_rsp->intr[i].status,
 1594                                 c_intr_rsp->intr[i].intr_id,
 1595                                 c_intr_rsp->intr[i].intr_src);
 1596                 }
 1597 
 1598                 return (-1);
 1599         }
 1600 
 1601         for (i = 0; ((i < num_intrs) && create); i++) {
 1602                 if (!c_intr_rsp->intr[i].status) {
 1603                         ha->hw.intr_id[(start_idx + i)] =
 1604                                 c_intr_rsp->intr[i].intr_id;
 1605                         ha->hw.intr_src[(start_idx + i)] =
 1606                                 c_intr_rsp->intr[i].intr_src;
 1607                 }
 1608         }
 1609 
 1610         return (0);
 1611 }
 1612 
 1613 /*
 1614  * Name: qla_config_rss
 1615  * Function: Configure RSS for the context/interface.
 1616  */
 1617 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
 1618                         0x8030f20c77cb2da3ULL,
 1619                         0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
 1620                         0x255b0ec26d5a56daULL };
 1621 
 1622 static int
 1623 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
 1624 {
 1625         q80_config_rss_t        *c_rss;
 1626         q80_config_rss_rsp_t    *c_rss_rsp;
 1627         uint32_t                err, i;
 1628         device_t                dev = ha->pci_dev;
 1629 
 1630         c_rss = (q80_config_rss_t *)ha->hw.mbox;
 1631         bzero(c_rss, (sizeof (q80_config_rss_t)));
 1632 
 1633         c_rss->opcode = Q8_MBX_CONFIG_RSS;
 1634 
 1635         c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
 1636         c_rss->count_version |= Q8_MBX_CMD_VERSION;
 1637 
 1638         c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
 1639                                 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
 1640         //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
 1641         //                      Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
 1642 
 1643         c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
 1644         c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
 1645 
 1646         c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
 1647 
 1648         c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
 1649         c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
 1650 
 1651         c_rss->cntxt_id = cntxt_id;
 1652 
 1653         for (i = 0; i < 5; i++) {
 1654                 c_rss->rss_key[i] = rss_key[i];
 1655         }
 1656 
 1657         if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
 1658                 (sizeof (q80_config_rss_t) >> 2),
 1659                 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
 1660                 device_printf(dev, "%s: failed0\n", __func__);
 1661                 return (-1);
 1662         }
 1663         c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
 1664 
 1665         err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
 1666 
 1667         if (err) {
 1668                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 1669                 return (-1);
 1670         }
 1671         return 0;
 1672 }
 1673 
 1674 static int
 1675 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
 1676         uint16_t cntxt_id, uint8_t *ind_table)
 1677 {
 1678         q80_config_rss_ind_table_t      *c_rss_ind;
 1679         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
 1680         uint32_t                        err;
 1681         device_t                        dev = ha->pci_dev;
 1682 
 1683         if ((count > Q8_RSS_IND_TBL_SIZE) ||
 1684                 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
 1685                 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
 1686                         start_idx, count);
 1687                 return (-1);
 1688         }
 1689 
 1690         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
 1691         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
 1692 
 1693         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
 1694         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
 1695         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
 1696 
 1697         c_rss_ind->start_idx = start_idx;
 1698         c_rss_ind->end_idx = start_idx + count - 1;
 1699         c_rss_ind->cntxt_id = cntxt_id;
 1700         bcopy(ind_table, c_rss_ind->ind_table, count);
 1701 
 1702         if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
 1703                 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
 1704                 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
 1705                 device_printf(dev, "%s: failed0\n", __func__);
 1706                 return (-1);
 1707         }
 1708 
 1709         c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
 1710         err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
 1711 
 1712         if (err) {
 1713                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 1714                 return (-1);
 1715         }
 1716         return 0;
 1717 }
 1718 
 1719 /*
 1720  * Name: qla_config_intr_coalesce
 1721  * Function: Configure Interrupt Coalescing.
 1722  */
 1723 static int
 1724 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
 1725         int rcv)
 1726 {
 1727         q80_config_intr_coalesc_t       *intrc;
 1728         q80_config_intr_coalesc_rsp_t   *intrc_rsp;
 1729         uint32_t                        err, i;
 1730         device_t                        dev = ha->pci_dev;
 1731 
 1732         intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
 1733         bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
 1734 
 1735         intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
 1736         intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
 1737         intrc->count_version |= Q8_MBX_CMD_VERSION;
 1738 
 1739         if (rcv) {
 1740                 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
 1741                 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
 1742                 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
 1743         } else {
 1744                 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
 1745                 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
 1746                 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
 1747         }
 1748 
 1749         intrc->cntxt_id = cntxt_id;
 1750 
 1751         if (tenable) {
 1752                 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
 1753                 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
 1754 
 1755                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
 1756                         intrc->sds_ring_mask |= (1 << i);
 1757                 }
 1758                 intrc->ms_timeout = 1000;
 1759         }
 1760 
 1761         if (qla_mbx_cmd(ha, (uint32_t *)intrc,
 1762                 (sizeof (q80_config_intr_coalesc_t) >> 2),
 1763                 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
 1764                 device_printf(dev, "%s: failed0\n", __func__);
 1765                 return (-1);
 1766         }
 1767         intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
 1768 
 1769         err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
 1770 
 1771         if (err) {
 1772                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 1773                 return (-1);
 1774         }
 1775 
 1776         return 0;
 1777 }
 1778 
 1779 /*
 1780  * Name: qla_config_mac_addr
 1781  * Function: binds a MAC address to the context/interface.
 1782  *      Can be unicast, multicast or broadcast.
 1783  */
 1784 static int
 1785 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
 1786         uint32_t num_mac)
 1787 {
 1788         q80_config_mac_addr_t           *cmac;
 1789         q80_config_mac_addr_rsp_t       *cmac_rsp;
 1790         uint32_t                        err;
 1791         device_t                        dev = ha->pci_dev;
 1792         int                             i;
 1793         uint8_t                         *mac_cpy = mac_addr;
 1794 
 1795         if (num_mac > Q8_MAX_MAC_ADDRS) {
 1796                 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
 1797                         __func__, (add_mac ? "Add" : "Del"), num_mac);
 1798                 return (-1);
 1799         }
 1800 
 1801         cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
 1802         bzero(cmac, (sizeof (q80_config_mac_addr_t)));
 1803 
 1804         cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
 1805         cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
 1806         cmac->count_version |= Q8_MBX_CMD_VERSION;
 1807 
 1808         if (add_mac) 
 1809                 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
 1810         else
 1811                 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
 1812                 
 1813         cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
 1814 
 1815         cmac->nmac_entries = num_mac;
 1816         cmac->cntxt_id = ha->hw.rcv_cntxt_id;
 1817 
 1818         for (i = 0; i < num_mac; i++) {
 1819                 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); 
 1820                 mac_addr = mac_addr + ETHER_ADDR_LEN;
 1821         }
 1822 
 1823         if (qla_mbx_cmd(ha, (uint32_t *)cmac,
 1824                 (sizeof (q80_config_mac_addr_t) >> 2),
 1825                 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
 1826                 device_printf(dev, "%s: %s failed0\n", __func__,
 1827                         (add_mac ? "Add" : "Del"));
 1828                 return (-1);
 1829         }
 1830         cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
 1831 
 1832         err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
 1833 
 1834         if (err) {
 1835                 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
 1836                         (add_mac ? "Add" : "Del"), err);
 1837                 for (i = 0; i < num_mac; i++) {
 1838                         device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
 1839                                 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
 1840                                 mac_cpy[3], mac_cpy[4], mac_cpy[5]);
 1841                         mac_cpy += ETHER_ADDR_LEN;
 1842                 }
 1843                 return (-1);
 1844         }
 1845 
 1846         return 0;
 1847 }
 1848 
 1849 /*
 1850  * Name: qla_set_mac_rcv_mode
 1851  * Function: Enable/Disable AllMulticast and Promiscous Modes.
 1852  */
 1853 static int
 1854 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
 1855 {
 1856         q80_config_mac_rcv_mode_t       *rcv_mode;
 1857         uint32_t                        err;
 1858         q80_config_mac_rcv_mode_rsp_t   *rcv_mode_rsp;
 1859         device_t                        dev = ha->pci_dev;
 1860 
 1861         rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
 1862         bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
 1863 
 1864         rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
 1865         rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
 1866         rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
 1867 
 1868         rcv_mode->mode = mode;
 1869 
 1870         rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
 1871 
 1872         if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
 1873                 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
 1874                 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
 1875                 device_printf(dev, "%s: failed0\n", __func__);
 1876                 return (-1);
 1877         }
 1878         rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
 1879 
 1880         err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
 1881 
 1882         if (err) {
 1883                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 1884                 return (-1);
 1885         }
 1886 
 1887         return 0;
 1888 }
 1889 
 1890 int
 1891 ql_set_promisc(qla_host_t *ha)
 1892 {
 1893         int ret;
 1894 
 1895         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
 1896         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
 1897         return (ret);
 1898 }
 1899 
 1900 void
 1901 qla_reset_promisc(qla_host_t *ha)
 1902 {
 1903         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
 1904         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
 1905 }
 1906 
 1907 int
 1908 ql_set_allmulti(qla_host_t *ha)
 1909 {
 1910         int ret;
 1911 
 1912         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
 1913         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
 1914         return (ret);
 1915 }
 1916 
 1917 void
 1918 qla_reset_allmulti(qla_host_t *ha)
 1919 {
 1920         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
 1921         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
 1922 }
 1923 
 1924 /*
 1925  * Name: ql_set_max_mtu
 1926  * Function:
 1927  *      Sets the maximum transfer unit size for the specified rcv context.
 1928  */
 1929 int
 1930 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
 1931 {
 1932         device_t                dev;
 1933         q80_set_max_mtu_t       *max_mtu;
 1934         q80_set_max_mtu_rsp_t   *max_mtu_rsp;
 1935         uint32_t                err;
 1936 
 1937         dev = ha->pci_dev;
 1938 
 1939         max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
 1940         bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
 1941 
 1942         max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
 1943         max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
 1944         max_mtu->count_version |= Q8_MBX_CMD_VERSION;
 1945 
 1946         max_mtu->cntxt_id = cntxt_id;
 1947         max_mtu->mtu = mtu;
 1948 
 1949         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
 1950                 (sizeof (q80_set_max_mtu_t) >> 2),
 1951                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
 1952                 device_printf(dev, "%s: failed\n", __func__);
 1953                 return -1;
 1954         }
 1955 
 1956         max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
 1957 
 1958         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
 1959 
 1960         if (err) {
 1961                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 1962         }
 1963 
 1964         return 0;
 1965 }
 1966 
 1967 static int
 1968 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
 1969 {
 1970         device_t                dev;
 1971         q80_link_event_t        *lnk;
 1972         q80_link_event_rsp_t    *lnk_rsp;
 1973         uint32_t                err;
 1974 
 1975         dev = ha->pci_dev;
 1976 
 1977         lnk = (q80_link_event_t *)ha->hw.mbox;
 1978         bzero(lnk, (sizeof (q80_link_event_t)));
 1979 
 1980         lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
 1981         lnk->count_version = (sizeof (q80_link_event_t) >> 2);
 1982         lnk->count_version |= Q8_MBX_CMD_VERSION;
 1983 
 1984         lnk->cntxt_id = cntxt_id;
 1985         lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
 1986 
 1987         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
 1988                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
 1989                 device_printf(dev, "%s: failed\n", __func__);
 1990                 return -1;
 1991         }
 1992 
 1993         lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
 1994 
 1995         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
 1996 
 1997         if (err) {
 1998                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 1999         }
 2000 
 2001         return 0;
 2002 }
 2003 
 2004 static int
 2005 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
 2006 {
 2007         device_t                dev;
 2008         q80_config_fw_lro_t     *fw_lro;
 2009         q80_config_fw_lro_rsp_t *fw_lro_rsp;
 2010         uint32_t                err;
 2011 
 2012         dev = ha->pci_dev;
 2013 
 2014         fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
 2015         bzero(fw_lro, sizeof(q80_config_fw_lro_t));
 2016 
 2017         fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
 2018         fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
 2019         fw_lro->count_version |= Q8_MBX_CMD_VERSION;
 2020 
 2021         fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
 2022         fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
 2023 
 2024         fw_lro->cntxt_id = cntxt_id;
 2025 
 2026         if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
 2027                 (sizeof (q80_config_fw_lro_t) >> 2),
 2028                 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
 2029                 device_printf(dev, "%s: failed\n", __func__);
 2030                 return -1;
 2031         }
 2032 
 2033         fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
 2034 
 2035         err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
 2036 
 2037         if (err) {
 2038                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 2039         }
 2040 
 2041         return 0;
 2042 }
 2043 
 2044 static int
 2045 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
 2046 {
 2047         device_t                dev;
 2048         q80_hw_config_t         *hw_config;
 2049         q80_hw_config_rsp_t     *hw_config_rsp;
 2050         uint32_t                err;
 2051 
 2052         dev = ha->pci_dev;
 2053 
 2054         hw_config = (q80_hw_config_t *)ha->hw.mbox;
 2055         bzero(hw_config, sizeof (q80_hw_config_t));
 2056 
 2057         hw_config->opcode = Q8_MBX_HW_CONFIG;
 2058         hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
 2059         hw_config->count_version |= Q8_MBX_CMD_VERSION;
 2060 
 2061         hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
 2062 
 2063         hw_config->u.set_cam_search_mode.mode = search_mode;
 2064 
 2065         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
 2066                 (sizeof (q80_hw_config_t) >> 2),
 2067                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
 2068                 device_printf(dev, "%s: failed\n", __func__);
 2069                 return -1;
 2070         }
 2071         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
 2072 
 2073         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
 2074 
 2075         if (err) {
 2076                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 2077         }
 2078 
 2079         return 0;
 2080 }
 2081 
 2082 static int
 2083 qla_get_cam_search_mode(qla_host_t *ha)
 2084 {
 2085         device_t                dev;
 2086         q80_hw_config_t         *hw_config;
 2087         q80_hw_config_rsp_t     *hw_config_rsp;
 2088         uint32_t                err;
 2089 
 2090         dev = ha->pci_dev;
 2091 
 2092         hw_config = (q80_hw_config_t *)ha->hw.mbox;
 2093         bzero(hw_config, sizeof (q80_hw_config_t));
 2094 
 2095         hw_config->opcode = Q8_MBX_HW_CONFIG;
 2096         hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
 2097         hw_config->count_version |= Q8_MBX_CMD_VERSION;
 2098 
 2099         hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
 2100 
 2101         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
 2102                 (sizeof (q80_hw_config_t) >> 2),
 2103                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
 2104                 device_printf(dev, "%s: failed\n", __func__);
 2105                 return -1;
 2106         }
 2107         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
 2108 
 2109         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
 2110 
 2111         if (err) {
 2112                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 2113         } else {
 2114                 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
 2115                         hw_config_rsp->u.get_cam_search_mode.mode);
 2116         }
 2117 
 2118         return 0;
 2119 }
 2120 
 2121 static int
 2122 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
 2123 {
 2124         device_t                dev;
 2125         q80_get_stats_t         *stat;
 2126         q80_get_stats_rsp_t     *stat_rsp;
 2127         uint32_t                err;
 2128 
 2129         dev = ha->pci_dev;
 2130 
 2131         stat = (q80_get_stats_t *)ha->hw.mbox;
 2132         bzero(stat, (sizeof (q80_get_stats_t)));
 2133 
 2134         stat->opcode = Q8_MBX_GET_STATS;
 2135         stat->count_version = 2;
 2136         stat->count_version |= Q8_MBX_CMD_VERSION;
 2137 
 2138         stat->cmd = cmd;
 2139 
 2140         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
 2141                 ha->hw.mbox, (rsp_size >> 2), 0)) {
 2142                 device_printf(dev, "%s: failed\n", __func__);
 2143                 return -1;
 2144         }
 2145 
 2146         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
 2147 
 2148         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
 2149 
 2150         if (err) {
 2151                 return -1;
 2152         }
 2153 
 2154         return 0;
 2155 }
 2156 
 2157 void
 2158 ql_get_stats(qla_host_t *ha)
 2159 {
 2160         q80_get_stats_rsp_t     *stat_rsp;
 2161         q80_mac_stats_t         *mstat;
 2162         q80_xmt_stats_t         *xstat;
 2163         q80_rcv_stats_t         *rstat;
 2164         uint32_t                cmd;
 2165         int                     i;
 2166         struct ifnet *ifp = ha->ifp;
 2167 
 2168         if (ifp == NULL)
 2169                 return;
 2170 
 2171         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) {
 2172                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
 2173                 return;
 2174         }
 2175 
 2176         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 2177                 QLA_UNLOCK(ha, __func__);
 2178                 return;
 2179         }
 2180 
 2181         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
 2182         /*
 2183          * Get MAC Statistics
 2184          */
 2185         cmd = Q8_GET_STATS_CMD_TYPE_MAC;
 2186 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
 2187 
 2188         cmd |= ((ha->pci_func & 0x1) << 16);
 2189 
 2190         if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
 2191                 ha->offline)
 2192                 goto ql_get_stats_exit;
 2193 
 2194         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
 2195                 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
 2196                 bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t));
 2197         } else {
 2198                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
 2199                         __func__, ha->hw.mbox[0]);
 2200         }
 2201         /*
 2202          * Get RCV Statistics
 2203          */
 2204         cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
 2205 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
 2206         cmd |= (ha->hw.rcv_cntxt_id << 16);
 2207 
 2208         if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
 2209                 ha->offline)
 2210                 goto ql_get_stats_exit;
 2211 
 2212         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
 2213                 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
 2214                 bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t));
 2215         } else {
 2216                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
 2217                         __func__, ha->hw.mbox[0]);
 2218         }
 2219 
 2220         if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
 2221                 ha->offline)
 2222                 goto ql_get_stats_exit;
 2223         /*
 2224          * Get XMT Statistics
 2225          */
 2226         for (i = 0 ; (i < ha->hw.num_tx_rings); i++) {
 2227                 if (ha->qla_watchdog_pause ||
 2228                         (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
 2229                         ha->offline)
 2230                         goto ql_get_stats_exit;
 2231 
 2232                 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
 2233 //              cmd |= Q8_GET_STATS_CMD_CLEAR;
 2234                 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
 2235 
 2236                 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
 2237                         == 0) {
 2238                         xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
 2239                         bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t));
 2240                 } else {
 2241                         device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
 2242                                 __func__, ha->hw.mbox[0]);
 2243                 }
 2244         }
 2245 
 2246 ql_get_stats_exit:
 2247         QLA_UNLOCK(ha, __func__);
 2248 
 2249         return;
 2250 }
 2251 
 2252 /*
 2253  * Name: qla_tx_tso
 2254  * Function: Checks if the packet to be transmitted is a candidate for
 2255  *      Large TCP Segment Offload. If yes, the appropriate fields in the Tx
 2256  *      Ring Structure are plugged in.
 2257  */
 2258 static int
 2259 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
 2260 {
 2261         struct ether_vlan_header *eh;
 2262         struct ip *ip = NULL;
 2263         struct ip6_hdr *ip6 = NULL;
 2264         struct tcphdr *th = NULL;
 2265         uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
 2266         uint16_t etype, opcode, offload = 1;
 2267 
 2268         eh = mtod(mp, struct ether_vlan_header *);
 2269 
 2270         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 2271                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 2272                 etype = ntohs(eh->evl_proto);
 2273         } else {
 2274                 ehdrlen = ETHER_HDR_LEN;
 2275                 etype = ntohs(eh->evl_encap_proto);
 2276         }
 2277 
 2278         hdrlen = 0;
 2279 
 2280         switch (etype) {
 2281                 case ETHERTYPE_IP:
 2282 
 2283                         tcp_opt_off = ehdrlen + sizeof(struct ip) +
 2284                                         sizeof(struct tcphdr);
 2285 
 2286                         if (mp->m_len < tcp_opt_off) {
 2287                                 m_copydata(mp, 0, tcp_opt_off, hdr);
 2288                                 ip = (struct ip *)(hdr + ehdrlen);
 2289                         } else {
 2290                                 ip = (struct ip *)(mp->m_data + ehdrlen);
 2291                         }
 2292 
 2293                         ip_hlen = ip->ip_hl << 2;
 2294                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
 2295 
 2296                                 
 2297                         if ((ip->ip_p != IPPROTO_TCP) ||
 2298                                 (ip_hlen != sizeof (struct ip))){
 2299                                 /* IP Options are not supported */
 2300 
 2301                                 offload = 0;
 2302                         } else
 2303                                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
 2304 
 2305                 break;
 2306 
 2307                 case ETHERTYPE_IPV6:
 2308 
 2309                         tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
 2310                                         sizeof (struct tcphdr);
 2311 
 2312                         if (mp->m_len < tcp_opt_off) {
 2313                                 m_copydata(mp, 0, tcp_opt_off, hdr);
 2314                                 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
 2315                         } else {
 2316                                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
 2317                         }
 2318 
 2319                         ip_hlen = sizeof(struct ip6_hdr);
 2320                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
 2321 
 2322                         if (ip6->ip6_nxt != IPPROTO_TCP) {
 2323                                 //device_printf(dev, "%s: ipv6\n", __func__);
 2324                                 offload = 0;
 2325                         } else
 2326                                 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
 2327                 break;
 2328 
 2329                 default:
 2330                         QL_DPRINT8(ha, (ha->pci_dev, "%s: type!=ip\n", __func__));
 2331                         offload = 0;
 2332                 break;
 2333         }
 2334 
 2335         if (!offload)
 2336                 return (-1);
 2337 
 2338         tcp_hlen = th->th_off << 2;
 2339         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
 2340 
 2341         if (mp->m_len < hdrlen) {
 2342                 if (mp->m_len < tcp_opt_off) {
 2343                         if (tcp_hlen > sizeof(struct tcphdr)) {
 2344                                 m_copydata(mp, tcp_opt_off,
 2345                                         (tcp_hlen - sizeof(struct tcphdr)),
 2346                                         &hdr[tcp_opt_off]);
 2347                         }
 2348                 } else {
 2349                         m_copydata(mp, 0, hdrlen, hdr);
 2350                 }
 2351         }
 2352 
 2353         tx_cmd->mss = mp->m_pkthdr.tso_segsz;
 2354 
 2355         tx_cmd->flags_opcode = opcode ;
 2356         tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
 2357         tx_cmd->total_hdr_len = hdrlen;
 2358 
 2359         /* Check for Multicast least significant bit of MSB == 1 */
 2360         if (eh->evl_dhost[0] & 0x01) {
 2361                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
 2362         }
 2363 
 2364         if (mp->m_len < hdrlen) {
 2365                 printf("%d\n", hdrlen);
 2366                 return (1);
 2367         }
 2368 
 2369         return (0);
 2370 }
 2371 
 2372 /*
 2373  * Name: qla_tx_chksum
 2374  * Function: Checks if the packet to be transmitted is a candidate for
 2375  *      TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
 2376  *      Ring Structure are plugged in.
 2377  */
 2378 static int
 2379 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
 2380         uint32_t *tcp_hdr_off)
 2381 {
 2382         struct ether_vlan_header *eh;
 2383         struct ip *ip;
 2384         struct ip6_hdr *ip6;
 2385         uint32_t ehdrlen, ip_hlen;
 2386         uint16_t etype, opcode, offload = 1;
 2387         uint8_t buf[sizeof(struct ip6_hdr)];
 2388 
 2389         *op_code = 0;
 2390 
 2391         if ((mp->m_pkthdr.csum_flags &
 2392                 (CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0)
 2393                 return (-1);
 2394 
 2395         eh = mtod(mp, struct ether_vlan_header *);
 2396 
 2397         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 2398                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 2399                 etype = ntohs(eh->evl_proto);
 2400         } else {
 2401                 ehdrlen = ETHER_HDR_LEN;
 2402                 etype = ntohs(eh->evl_encap_proto);
 2403         }
 2404 
 2405                 
 2406         switch (etype) {
 2407                 case ETHERTYPE_IP:
 2408                         ip = (struct ip *)(mp->m_data + ehdrlen);
 2409 
 2410                         ip_hlen = sizeof (struct ip);
 2411 
 2412                         if (mp->m_len < (ehdrlen + ip_hlen)) {
 2413                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
 2414                                 ip = (struct ip *)buf;
 2415                         }
 2416 
 2417                         if (ip->ip_p == IPPROTO_TCP)
 2418                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
 2419                         else if (ip->ip_p == IPPROTO_UDP)
 2420                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
 2421                         else {
 2422                                 //device_printf(dev, "%s: ipv4\n", __func__);
 2423                                 offload = 0;
 2424                         }
 2425                 break;
 2426 
 2427                 case ETHERTYPE_IPV6:
 2428                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
 2429 
 2430                         ip_hlen = sizeof(struct ip6_hdr);
 2431 
 2432                         if (mp->m_len < (ehdrlen + ip_hlen)) {
 2433                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
 2434                                         buf);
 2435                                 ip6 = (struct ip6_hdr *)buf;
 2436                         }
 2437 
 2438                         if (ip6->ip6_nxt == IPPROTO_TCP)
 2439                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
 2440                         else if (ip6->ip6_nxt == IPPROTO_UDP)
 2441                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
 2442                         else {
 2443                                 //device_printf(dev, "%s: ipv6\n", __func__);
 2444                                 offload = 0;
 2445                         }
 2446                 break;
 2447 
 2448                 default:
 2449                         offload = 0;
 2450                 break;
 2451         }
 2452         if (!offload)
 2453                 return (-1);
 2454 
 2455         *op_code = opcode;
 2456         *tcp_hdr_off = (ip_hlen + ehdrlen);
 2457 
 2458         return (0);
 2459 }
 2460 
 2461 #define QLA_TX_MIN_FREE 2
 2462 /*
 2463  * Name: ql_hw_send
 2464  * Function: Transmits a packet. It first checks if the packet is a
 2465  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
 2466  *      offload. If either of these creteria are not met, it is transmitted
 2467  *      as a regular ethernet frame.
 2468  */
 2469 int
 2470 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
 2471         uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
 2472 {
 2473         struct ether_vlan_header *eh;
 2474         qla_hw_t *hw = &ha->hw;
 2475         q80_tx_cmd_t *tx_cmd, tso_cmd;
 2476         bus_dma_segment_t *c_seg;
 2477         uint32_t num_tx_cmds, hdr_len = 0;
 2478         uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
 2479         device_t dev;
 2480         int i, ret;
 2481         uint8_t *src = NULL, *dst = NULL;
 2482         uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
 2483         uint32_t op_code = 0;
 2484         uint32_t tcp_hdr_off = 0;
 2485 
 2486         dev = ha->pci_dev;
 2487 
 2488         /*
 2489          * Always make sure there is atleast one empty slot in the tx_ring
 2490          * tx_ring is considered full when there only one entry available
 2491          */
 2492         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
 2493 
 2494         total_length = mp->m_pkthdr.len;
 2495         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
 2496                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
 2497                         __func__, total_length);
 2498                 return (EINVAL);
 2499         }
 2500         eh = mtod(mp, struct ether_vlan_header *);
 2501 
 2502         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
 2503                 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
 2504 
 2505                 src = frame_hdr;
 2506                 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
 2507 
 2508                 if (!(ret & ~1)) {
 2509                         /* find the additional tx_cmd descriptors required */
 2510 
 2511                         if (mp->m_flags & M_VLANTAG)
 2512                                 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
 2513 
 2514                         hdr_len = tso_cmd.total_hdr_len;
 2515 
 2516                         bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
 2517                         bytes = QL_MIN(bytes, hdr_len);
 2518 
 2519                         num_tx_cmds++;
 2520                         hdr_len -= bytes;
 2521 
 2522                         while (hdr_len) {
 2523                                 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
 2524                                 hdr_len -= bytes;
 2525                                 num_tx_cmds++;
 2526                         }
 2527                         hdr_len = tso_cmd.total_hdr_len;
 2528 
 2529                         if (ret == 0)
 2530                                 src = (uint8_t *)eh;
 2531                 } else 
 2532                         return (EINVAL);
 2533         } else {
 2534                 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
 2535         }
 2536 
 2537         if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
 2538                 ql_hw_tx_done_locked(ha, txr_idx);
 2539                 if (hw->tx_cntxt[txr_idx].txr_free <=
 2540                                 (num_tx_cmds + QLA_TX_MIN_FREE)) {
 2541                         QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
 2542                                 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
 2543                                 __func__));
 2544                         return (-1);
 2545                 }
 2546         }
 2547 
 2548         for (i = 0; i < num_tx_cmds; i++) {
 2549                 int j;
 2550 
 2551                 j = (tx_idx+i) & (NUM_TX_DESCRIPTORS - 1);
 2552 
 2553                 if (NULL != ha->tx_ring[txr_idx].tx_buf[j].m_head) {
 2554                         QL_ASSERT(ha, 0, \
 2555                                 ("%s [%d]: txr_idx = %d tx_idx = %d mbuf = %p\n",\
 2556                                 __func__, __LINE__, txr_idx, j,\
 2557                                 ha->tx_ring[txr_idx].tx_buf[j].m_head));
 2558                         return (EINVAL);
 2559                 }
 2560         }
 2561 
 2562         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
 2563 
 2564         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
 2565                 if (nsegs > ha->hw.max_tx_segs)
 2566                         ha->hw.max_tx_segs = nsegs;
 2567 
 2568                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
 2569 
 2570                 if (op_code) {
 2571                         tx_cmd->flags_opcode = op_code;
 2572                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
 2573 
 2574                 } else {
 2575                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
 2576                 }
 2577         } else {
 2578                 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
 2579                 ha->tx_tso_frames++;
 2580         }
 2581 
 2582         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 2583                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
 2584 
 2585                 if (iscsi_pdu)
 2586                         eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
 2587 
 2588         } else if (mp->m_flags & M_VLANTAG) {
 2589                 if (hdr_len) { /* TSO */
 2590                         tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
 2591                                                 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
 2592                         tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
 2593                 } else
 2594                         tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
 2595 
 2596                 ha->hw_vlan_tx_frames++;
 2597                 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
 2598 
 2599                 if (iscsi_pdu) {
 2600                         tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
 2601                         mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
 2602                 }
 2603         }
 2604 
 2605         tx_cmd->n_bufs = (uint8_t)nsegs;
 2606         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
 2607         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
 2608         tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
 2609 
 2610         c_seg = segs;
 2611 
 2612         while (1) {
 2613                 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
 2614                         switch (i) {
 2615                         case 0:
 2616                                 tx_cmd->buf1_addr = c_seg->ds_addr;
 2617                                 tx_cmd->buf1_len = c_seg->ds_len;
 2618                                 break;
 2619 
 2620                         case 1:
 2621                                 tx_cmd->buf2_addr = c_seg->ds_addr;
 2622                                 tx_cmd->buf2_len = c_seg->ds_len;
 2623                                 break;
 2624 
 2625                         case 2:
 2626                                 tx_cmd->buf3_addr = c_seg->ds_addr;
 2627                                 tx_cmd->buf3_len = c_seg->ds_len;
 2628                                 break;
 2629 
 2630                         case 3:
 2631                                 tx_cmd->buf4_addr = c_seg->ds_addr;
 2632                                 tx_cmd->buf4_len = c_seg->ds_len;
 2633                                 break;
 2634                         }
 2635 
 2636                         c_seg++;
 2637                         nsegs--;
 2638                 }
 2639 
 2640                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
 2641                         (hw->tx_cntxt[txr_idx].txr_next + 1) &
 2642                                 (NUM_TX_DESCRIPTORS - 1);
 2643                 tx_cmd_count++;
 2644 
 2645                 if (!nsegs)
 2646                         break;
 2647                 
 2648                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
 2649                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
 2650         }
 2651 
 2652         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
 2653                 /* TSO : Copy the header in the following tx cmd descriptors */
 2654 
 2655                 txr_next = hw->tx_cntxt[txr_idx].txr_next;
 2656 
 2657                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
 2658                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
 2659 
 2660                 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
 2661                 bytes = QL_MIN(bytes, hdr_len);
 2662 
 2663                 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
 2664 
 2665                 if (mp->m_flags & M_VLANTAG) {
 2666                         /* first copy the src/dst MAC addresses */
 2667                         bcopy(src, dst, (ETHER_ADDR_LEN * 2));
 2668                         dst += (ETHER_ADDR_LEN * 2);
 2669                         src += (ETHER_ADDR_LEN * 2);
 2670                         
 2671                         *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
 2672                         dst += 2;
 2673                         *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
 2674                         dst += 2;
 2675 
 2676                         /* bytes left in src header */
 2677                         hdr_len -= ((ETHER_ADDR_LEN * 2) +
 2678                                         ETHER_VLAN_ENCAP_LEN);
 2679 
 2680                         /* bytes left in TxCmd Entry */
 2681                         bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
 2682 
 2683                         bcopy(src, dst, bytes);
 2684                         src += bytes;
 2685                         hdr_len -= bytes;
 2686                 } else {
 2687                         bcopy(src, dst, bytes);
 2688                         src += bytes;
 2689                         hdr_len -= bytes;
 2690                 }
 2691 
 2692                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
 2693                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
 2694                                         (NUM_TX_DESCRIPTORS - 1);
 2695                 tx_cmd_count++;
 2696                 
 2697                 while (hdr_len) {
 2698                         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
 2699                         bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
 2700 
 2701                         bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
 2702 
 2703                         bcopy(src, tx_cmd, bytes);
 2704                         src += bytes;
 2705                         hdr_len -= bytes;
 2706 
 2707                         txr_next = hw->tx_cntxt[txr_idx].txr_next =
 2708                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
 2709                                         (NUM_TX_DESCRIPTORS - 1);
 2710                         tx_cmd_count++;
 2711                 }
 2712         }
 2713 
 2714         hw->tx_cntxt[txr_idx].txr_free =
 2715                 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
 2716 
 2717         QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
 2718                 txr_idx);
 2719         QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
 2720 
 2721         return (0);
 2722 }
 2723 
 2724 #define Q8_CONFIG_IND_TBL_SIZE  32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
 2725 static int
 2726 qla_config_rss_ind_table(qla_host_t *ha)
 2727 {
 2728         uint32_t i, count;
 2729         uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
 2730 
 2731         for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
 2732                 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
 2733         }
 2734 
 2735         for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
 2736                 i = i + Q8_CONFIG_IND_TBL_SIZE) {
 2737                 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
 2738                         count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
 2739                 } else {
 2740                         count = Q8_CONFIG_IND_TBL_SIZE;
 2741                 }
 2742 
 2743                 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
 2744                         rss_ind_tbl))
 2745                         return (-1);
 2746         }
 2747 
 2748         return (0);
 2749 }
 2750 
 2751 static int
 2752 qla_config_soft_lro(qla_host_t *ha)
 2753 {
 2754 #if defined(INET) || defined(INET6)
 2755         int i;
 2756         qla_hw_t *hw = &ha->hw;
 2757         struct lro_ctrl *lro;
 2758 
 2759         for (i = 0; i < hw->num_sds_rings; i++) {
 2760                 lro = &hw->sds[i].lro;
 2761 
 2762                 bzero(lro, sizeof(struct lro_ctrl));
 2763 
 2764 #if (__FreeBSD_version >= 1100101)
 2765                 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
 2766                         device_printf(ha->pci_dev,
 2767                                 "%s: tcp_lro_init_args [%d] failed\n",
 2768                                 __func__, i);
 2769                         return (-1);
 2770                 }
 2771 #else
 2772                 if (tcp_lro_init(lro)) {
 2773                         device_printf(ha->pci_dev,
 2774                                 "%s: tcp_lro_init [%d] failed\n",
 2775                                 __func__, i);
 2776                         return (-1);
 2777                 }
 2778 #endif /* #if (__FreeBSD_version >= 1100101) */
 2779 
 2780                 lro->ifp = ha->ifp;
 2781         }
 2782 
 2783         QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
 2784 #endif
 2785         return (0);
 2786 }
 2787 
 2788 static void
 2789 qla_drain_soft_lro(qla_host_t *ha)
 2790 {
 2791 #if defined(INET) || defined(INET6)
 2792         int i;
 2793         qla_hw_t *hw = &ha->hw;
 2794         struct lro_ctrl *lro;
 2795 
 2796         for (i = 0; i < hw->num_sds_rings; i++) {
 2797                 lro = &hw->sds[i].lro;
 2798 
 2799 #if (__FreeBSD_version >= 1100101)
 2800                 tcp_lro_flush_all(lro);
 2801 #else
 2802                 struct lro_entry *queued;
 2803 
 2804                 while ((!SLIST_EMPTY(&lro->lro_active))) {
 2805                         queued = SLIST_FIRST(&lro->lro_active);
 2806                         SLIST_REMOVE_HEAD(&lro->lro_active, next);
 2807                         tcp_lro_flush(lro, queued);
 2808                 }
 2809 #endif /* #if (__FreeBSD_version >= 1100101) */
 2810         }
 2811 #endif
 2812 
 2813         return;
 2814 }
 2815 
 2816 static void
 2817 qla_free_soft_lro(qla_host_t *ha)
 2818 {
 2819 #if defined(INET) || defined(INET6)
 2820         int i;
 2821         qla_hw_t *hw = &ha->hw;
 2822         struct lro_ctrl *lro;
 2823 
 2824         for (i = 0; i < hw->num_sds_rings; i++) {
 2825                 lro = &hw->sds[i].lro;
 2826                 tcp_lro_free(lro);
 2827         }
 2828 #endif
 2829 
 2830         return;
 2831 }
 2832 
 2833 /*
 2834  * Name: ql_del_hw_if
 2835  * Function: Destroys the hardware specific entities corresponding to an
 2836  *      Ethernet Interface
 2837  */
 2838 void
 2839 ql_del_hw_if(qla_host_t *ha)
 2840 {
 2841         uint32_t i;
 2842         uint32_t num_msix;
 2843 
 2844         (void)qla_stop_nic_func(ha);
 2845 
 2846         qla_del_rcv_cntxt(ha);
 2847 
 2848         if(qla_del_xmt_cntxt(ha))
 2849                 goto ql_del_hw_if_exit;
 2850 
 2851         if (ha->hw.flags.init_intr_cnxt) {
 2852                 for (i = 0; i < ha->hw.num_sds_rings; ) {
 2853                         if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
 2854                                 num_msix = Q8_MAX_INTR_VECTORS;
 2855                         else
 2856                                 num_msix = ha->hw.num_sds_rings - i;
 2857 
 2858                         if (qla_config_intr_cntxt(ha, i, num_msix, 0))
 2859                                 break;
 2860 
 2861                         i += num_msix;
 2862                 }
 2863 
 2864                 ha->hw.flags.init_intr_cnxt = 0;
 2865         }
 2866 
 2867 ql_del_hw_if_exit:
 2868         if (ha->hw.enable_soft_lro) {
 2869                 qla_drain_soft_lro(ha);
 2870                 qla_free_soft_lro(ha);
 2871         }
 2872 
 2873         return;
 2874 }
 2875 
 2876 void
 2877 qla_confirm_9kb_enable(qla_host_t *ha)
 2878 {
 2879 //      uint32_t supports_9kb = 0;
 2880 
 2881         ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
 2882 
 2883         /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
 2884         WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
 2885         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
 2886 
 2887 #if 0
 2888         qla_get_nic_partition(ha, &supports_9kb, NULL);
 2889 
 2890         if (!supports_9kb)
 2891 #endif
 2892         ha->hw.enable_9kb = 0;
 2893 
 2894         return;
 2895 }
 2896 
 2897 /*
 2898  * Name: ql_init_hw_if
 2899  * Function: Creates the hardware specific entities corresponding to an
 2900  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
 2901  *      corresponding to the interface. Enables LRO if allowed.
 2902  */
 2903 int
 2904 ql_init_hw_if(qla_host_t *ha)
 2905 {
 2906         uint32_t        i;
 2907         uint8_t         bcast_mac[6];
 2908         qla_rdesc_t     *rdesc;
 2909         uint32_t        num_msix;
 2910 
 2911         for (i = 0; i < ha->hw.num_sds_rings; i++) {
 2912                 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
 2913                         ha->hw.dma_buf.sds_ring[i].size);
 2914         }
 2915 
 2916         for (i = 0; i < ha->hw.num_sds_rings; ) {
 2917                 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
 2918                         num_msix = Q8_MAX_INTR_VECTORS;
 2919                 else
 2920                         num_msix = ha->hw.num_sds_rings - i;
 2921 
 2922                 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
 2923                         if (i > 0) {
 2924                                 num_msix = i;
 2925 
 2926                                 for (i = 0; i < num_msix; ) {
 2927                                         qla_config_intr_cntxt(ha, i,
 2928                                                 Q8_MAX_INTR_VECTORS, 0);
 2929                                         i += Q8_MAX_INTR_VECTORS;
 2930                                 }
 2931                         }
 2932                         return (-1);
 2933                 }
 2934 
 2935                 i = i + num_msix;
 2936         }
 2937 
 2938         ha->hw.flags.init_intr_cnxt = 1;
 2939 
 2940         /*
 2941          * Create Receive Context
 2942          */
 2943         if (qla_init_rcv_cntxt(ha)) {
 2944                 return (-1);
 2945         }
 2946 
 2947         for (i = 0; i < ha->hw.num_rds_rings; i++) {
 2948                 rdesc = &ha->hw.rds[i];
 2949                 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
 2950                 rdesc->rx_in = 0;
 2951                 /* Update the RDS Producer Indices */
 2952                 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
 2953                         rdesc->rx_next);
 2954         }
 2955 
 2956         /*
 2957          * Create Transmit Context
 2958          */
 2959         if (qla_init_xmt_cntxt(ha)) {
 2960                 qla_del_rcv_cntxt(ha);
 2961                 return (-1);
 2962         }
 2963         ha->hw.max_tx_segs = 0;
 2964 
 2965         if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
 2966                 return(-1);
 2967 
 2968         ha->hw.flags.unicast_mac = 1;
 2969 
 2970         bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
 2971         bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
 2972 
 2973         if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
 2974                 return (-1);
 2975 
 2976         ha->hw.flags.bcast_mac = 1;
 2977 
 2978         /*
 2979          * program any cached multicast addresses
 2980          */
 2981         if (qla_hw_add_all_mcast(ha))
 2982                 return (-1);
 2983 
 2984         if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id))
 2985                 return (-1);
 2986 
 2987         if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
 2988                 return (-1);
 2989 
 2990         if (qla_config_rss_ind_table(ha))
 2991                 return (-1);
 2992 
 2993         if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
 2994                 return (-1);
 2995 
 2996         if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
 2997                 return (-1);
 2998 
 2999         if (ha->ifp->if_capenable & IFCAP_LRO) {
 3000                 if (ha->hw.enable_hw_lro) {
 3001                         ha->hw.enable_soft_lro = 0;
 3002 
 3003                         if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
 3004                                 return (-1);
 3005                 } else {
 3006                         ha->hw.enable_soft_lro = 1;
 3007 
 3008                         if (qla_config_soft_lro(ha))
 3009                                 return (-1);
 3010                 }
 3011         }
 3012 
 3013         if (qla_init_nic_func(ha))
 3014                 return (-1);
 3015 
 3016         if (qla_query_fw_dcbx_caps(ha))
 3017                 return (-1);
 3018 
 3019         for (i = 0; i < ha->hw.num_sds_rings; i++)
 3020                 QL_ENABLE_INTERRUPTS(ha, i);
 3021 
 3022         return (0);
 3023 }
 3024 
 3025 static int
 3026 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
 3027 {
 3028         device_t                dev = ha->pci_dev;
 3029         q80_rq_map_sds_to_rds_t *map_rings;
 3030         q80_rsp_map_sds_to_rds_t *map_rings_rsp;
 3031         uint32_t                i, err;
 3032         qla_hw_t                *hw = &ha->hw;
 3033 
 3034         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
 3035         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
 3036 
 3037         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
 3038         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
 3039         map_rings->count_version |= Q8_MBX_CMD_VERSION;
 3040 
 3041         map_rings->cntxt_id = hw->rcv_cntxt_id;
 3042         map_rings->num_rings = num_idx;
 3043 
 3044         for (i = 0; i < num_idx; i++) {
 3045                 map_rings->sds_rds[i].sds_ring = i + start_idx;
 3046                 map_rings->sds_rds[i].rds_ring = i + start_idx;
 3047         }
 3048 
 3049         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
 3050                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
 3051                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
 3052                 device_printf(dev, "%s: failed0\n", __func__);
 3053                 return (-1);
 3054         }
 3055 
 3056         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
 3057 
 3058         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
 3059 
 3060         if (err) {
 3061                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 3062                 return (-1);
 3063         }
 3064 
 3065         return (0);
 3066 }
 3067 
 3068 /*
 3069  * Name: qla_init_rcv_cntxt
 3070  * Function: Creates the Receive Context.
 3071  */
 3072 static int
 3073 qla_init_rcv_cntxt(qla_host_t *ha)
 3074 {
 3075         q80_rq_rcv_cntxt_t      *rcntxt;
 3076         q80_rsp_rcv_cntxt_t     *rcntxt_rsp;
 3077         q80_stat_desc_t         *sdesc;
 3078         int                     i, j;
 3079         qla_hw_t                *hw = &ha->hw;
 3080         device_t                dev;
 3081         uint32_t                err;
 3082         uint32_t                rcntxt_sds_rings;
 3083         uint32_t                rcntxt_rds_rings;
 3084         uint32_t                max_idx;
 3085 
 3086         dev = ha->pci_dev;
 3087 
 3088         /*
 3089          * Create Receive Context
 3090          */
 3091 
 3092         for (i = 0; i < hw->num_sds_rings; i++) {
 3093                 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
 3094 
 3095                 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
 3096                         sdesc->data[0] = 1ULL;
 3097                         sdesc->data[1] = 1ULL;
 3098                 }
 3099         }
 3100 
 3101         rcntxt_sds_rings = hw->num_sds_rings;
 3102         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
 3103                 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
 3104 
 3105         rcntxt_rds_rings = hw->num_rds_rings;
 3106 
 3107         if (hw->num_rds_rings > MAX_RDS_RING_SETS)
 3108                 rcntxt_rds_rings = MAX_RDS_RING_SETS;
 3109 
 3110         rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
 3111         bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
 3112 
 3113         rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
 3114         rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
 3115         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
 3116 
 3117         rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
 3118                         Q8_RCV_CNTXT_CAP0_LRO |
 3119                         Q8_RCV_CNTXT_CAP0_HW_LRO |
 3120                         Q8_RCV_CNTXT_CAP0_RSS |
 3121                         Q8_RCV_CNTXT_CAP0_SGL_LRO;
 3122 
 3123         if (ha->hw.enable_9kb)
 3124                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
 3125         else
 3126                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
 3127 
 3128         if (ha->hw.num_rds_rings > 1) {
 3129                 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
 3130                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
 3131         } else
 3132                 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
 3133 
 3134         rcntxt->nsds_rings = rcntxt_sds_rings;
 3135 
 3136         rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
 3137 
 3138         rcntxt->rcv_vpid = 0;
 3139 
 3140         for (i = 0; i <  rcntxt_sds_rings; i++) {
 3141                 rcntxt->sds[i].paddr =
 3142                         qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
 3143                 rcntxt->sds[i].size =
 3144                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
 3145                 rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
 3146                 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
 3147         }
 3148 
 3149         for (i = 0; i <  rcntxt_rds_rings; i++) {
 3150                 rcntxt->rds[i].paddr_std =
 3151                         qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
 3152 
 3153                 if (ha->hw.enable_9kb)
 3154                         rcntxt->rds[i].std_bsize =
 3155                                 qla_host_to_le64(MJUM9BYTES);
 3156                 else
 3157                         rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
 3158 
 3159                 rcntxt->rds[i].std_nentries =
 3160                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
 3161         }
 3162 
 3163         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
 3164                 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
 3165                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
 3166                 device_printf(dev, "%s: failed0\n", __func__);
 3167                 return (-1);
 3168         }
 3169 
 3170         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
 3171 
 3172         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
 3173 
 3174         if (err) {
 3175                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 3176                 return (-1);
 3177         }
 3178 
 3179         for (i = 0; i <  rcntxt_sds_rings; i++) {
 3180                 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
 3181         }
 3182 
 3183         for (i = 0; i <  rcntxt_rds_rings; i++) {
 3184                 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
 3185         }
 3186 
 3187         hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
 3188 
 3189         ha->hw.flags.init_rx_cnxt = 1;
 3190 
 3191         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
 3192                 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
 3193                         if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
 3194                                 max_idx = MAX_RCNTXT_SDS_RINGS;
 3195                         else
 3196                                 max_idx = hw->num_sds_rings - i;
 3197 
 3198                         err = qla_add_rcv_rings(ha, i, max_idx);
 3199                         if (err)
 3200                                 return -1;
 3201 
 3202                         i += max_idx;
 3203                 }
 3204         }
 3205 
 3206         if (hw->num_rds_rings > 1) {
 3207                 for (i = 0; i < hw->num_rds_rings; ) {
 3208                         if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
 3209                                 max_idx = MAX_SDS_TO_RDS_MAP;
 3210                         else
 3211                                 max_idx = hw->num_rds_rings - i;
 3212 
 3213                         err = qla_map_sds_to_rds(ha, i, max_idx);
 3214                         if (err)
 3215                                 return -1;
 3216 
 3217                         i += max_idx;
 3218                 }
 3219         }
 3220 
 3221         return (0);
 3222 }
 3223 
 3224 static int
 3225 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
 3226 {
 3227         device_t                dev = ha->pci_dev;
 3228         q80_rq_add_rcv_rings_t  *add_rcv;
 3229         q80_rsp_add_rcv_rings_t *add_rcv_rsp;
 3230         uint32_t                i,j, err;
 3231         qla_hw_t                *hw = &ha->hw;
 3232 
 3233         add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
 3234         bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
 3235 
 3236         add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
 3237         add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
 3238         add_rcv->count_version |= Q8_MBX_CMD_VERSION;
 3239 
 3240         add_rcv->nrds_sets_rings = nsds | (1 << 5);
 3241         add_rcv->nsds_rings = nsds;
 3242         add_rcv->cntxt_id = hw->rcv_cntxt_id;
 3243 
 3244         for (i = 0; i <  nsds; i++) {
 3245                 j = i + sds_idx;
 3246 
 3247                 add_rcv->sds[i].paddr =
 3248                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
 3249 
 3250                 add_rcv->sds[i].size =
 3251                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
 3252 
 3253                 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
 3254                 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
 3255         }
 3256 
 3257         for (i = 0; (i <  nsds); i++) {
 3258                 j = i + sds_idx;
 3259 
 3260                 add_rcv->rds[i].paddr_std =
 3261                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
 3262 
 3263                 if (ha->hw.enable_9kb)
 3264                         add_rcv->rds[i].std_bsize =
 3265                                 qla_host_to_le64(MJUM9BYTES);
 3266                 else
 3267                         add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
 3268 
 3269                 add_rcv->rds[i].std_nentries =
 3270                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
 3271         }
 3272 
 3273         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
 3274                 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
 3275                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
 3276                 device_printf(dev, "%s: failed0\n", __func__);
 3277                 return (-1);
 3278         }
 3279 
 3280         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
 3281 
 3282         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
 3283 
 3284         if (err) {
 3285                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 3286                 return (-1);
 3287         }
 3288 
 3289         for (i = 0; i < nsds; i++) {
 3290                 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
 3291         }
 3292 
 3293         for (i = 0; i < nsds; i++) {
 3294                 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
 3295         }
 3296 
 3297         return (0);
 3298 }
 3299 
 3300 /*
 3301  * Name: qla_del_rcv_cntxt
 3302  * Function: Destroys the Receive Context.
 3303  */
 3304 static void
 3305 qla_del_rcv_cntxt(qla_host_t *ha)
 3306 {
 3307         device_t                        dev = ha->pci_dev;
 3308         q80_rcv_cntxt_destroy_t         *rcntxt;
 3309         q80_rcv_cntxt_destroy_rsp_t     *rcntxt_rsp;
 3310         uint32_t                        err;
 3311         uint8_t                         bcast_mac[6];
 3312 
 3313         if (!ha->hw.flags.init_rx_cnxt)
 3314                 return;
 3315 
 3316         if (qla_hw_del_all_mcast(ha))
 3317                 return;
 3318 
 3319         if (ha->hw.flags.bcast_mac) {
 3320                 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
 3321                 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
 3322 
 3323                 if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
 3324                         return;
 3325                 ha->hw.flags.bcast_mac = 0;
 3326         }
 3327 
 3328         if (ha->hw.flags.unicast_mac) {
 3329                 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
 3330                         return;
 3331                 ha->hw.flags.unicast_mac = 0;
 3332         }
 3333 
 3334         rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
 3335         bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
 3336 
 3337         rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
 3338         rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
 3339         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
 3340 
 3341         rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
 3342 
 3343         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
 3344                 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
 3345                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
 3346                 device_printf(dev, "%s: failed0\n", __func__);
 3347                 return;
 3348         }
 3349         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
 3350 
 3351         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
 3352 
 3353         if (err) {
 3354                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 3355         }
 3356 
 3357         ha->hw.flags.init_rx_cnxt = 0;
 3358         return;
 3359 }
 3360 
 3361 /*
 3362  * Name: qla_init_xmt_cntxt
 3363  * Function: Creates the Transmit Context.
 3364  */
 3365 static int
 3366 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
 3367 {
 3368         device_t                dev;
 3369         qla_hw_t                *hw = &ha->hw;
 3370         q80_rq_tx_cntxt_t       *tcntxt;
 3371         q80_rsp_tx_cntxt_t      *tcntxt_rsp;
 3372         uint32_t                err;
 3373         qla_hw_tx_cntxt_t       *hw_tx_cntxt;
 3374         uint32_t                intr_idx;
 3375 
 3376         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
 3377 
 3378         dev = ha->pci_dev;
 3379 
 3380         /*
 3381          * Create Transmit Context
 3382          */
 3383         tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
 3384         bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
 3385 
 3386         tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
 3387         tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
 3388         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
 3389 
 3390         intr_idx = txr_idx;
 3391 
 3392 #ifdef QL_ENABLE_ISCSI_TLV
 3393 
 3394         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
 3395                                 Q8_TX_CNTXT_CAP0_TC;
 3396 
 3397         if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
 3398                 tcntxt->traffic_class = 1;
 3399         }
 3400 
 3401         intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
 3402 
 3403 #else
 3404         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
 3405 
 3406 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
 3407 
 3408         tcntxt->ntx_rings = 1;
 3409 
 3410         tcntxt->tx_ring[0].paddr =
 3411                 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
 3412         tcntxt->tx_ring[0].tx_consumer =
 3413                 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
 3414         tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
 3415 
 3416         tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
 3417         tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
 3418 
 3419         hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
 3420         hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
 3421         *(hw_tx_cntxt->tx_cons) = 0;
 3422 
 3423         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
 3424                 (sizeof (q80_rq_tx_cntxt_t) >> 2),
 3425                 ha->hw.mbox,
 3426                 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
 3427                 device_printf(dev, "%s: failed0\n", __func__);
 3428                 return (-1);
 3429         }
 3430         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
 3431 
 3432         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
 3433 
 3434         if (err) {
 3435                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 3436                 return -1;
 3437         }
 3438 
 3439         hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
 3440         hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
 3441 
 3442         if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
 3443                 return (-1);
 3444 
 3445         return (0);
 3446 }
 3447 
 3448 /*
 3449  * Name: qla_del_xmt_cntxt
 3450  * Function: Destroys the Transmit Context.
 3451  */
 3452 static int
 3453 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
 3454 {
 3455         device_t                        dev = ha->pci_dev;
 3456         q80_tx_cntxt_destroy_t          *tcntxt;
 3457         q80_tx_cntxt_destroy_rsp_t      *tcntxt_rsp;
 3458         uint32_t                        err;
 3459 
 3460         tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
 3461         bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
 3462 
 3463         tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
 3464         tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
 3465         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
 3466 
 3467         tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
 3468 
 3469         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
 3470                 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
 3471                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
 3472                 device_printf(dev, "%s: failed0\n", __func__);
 3473                 return (-1);
 3474         }
 3475         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
 3476 
 3477         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
 3478 
 3479         if (err) {
 3480                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
 3481                 return (-1);
 3482         }
 3483 
 3484         return (0);
 3485 }
 3486 static int
 3487 qla_del_xmt_cntxt(qla_host_t *ha)
 3488 {
 3489         uint32_t i;
 3490         int ret = 0;
 3491 
 3492         if (!ha->hw.flags.init_tx_cnxt)
 3493                 return (ret);
 3494 
 3495         for (i = 0; i < ha->hw.num_tx_rings; i++) {
 3496                 if ((ret = qla_del_xmt_cntxt_i(ha, i)) != 0)
 3497                         break;
 3498         }
 3499         ha->hw.flags.init_tx_cnxt = 0;
 3500 
 3501         return (ret);
 3502 }
 3503 
 3504 static int
 3505 qla_init_xmt_cntxt(qla_host_t *ha)
 3506 {
 3507         uint32_t i, j;
 3508 
 3509         for (i = 0; i < ha->hw.num_tx_rings; i++) {
 3510                 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
 3511                         for (j = 0; j < i; j++) {
 3512                                 if (qla_del_xmt_cntxt_i(ha, j))
 3513                                         break;
 3514                         }
 3515                         return (-1);
 3516                 }
 3517         }
 3518         ha->hw.flags.init_tx_cnxt = 1;
 3519         return (0);
 3520 }
 3521 
 3522 static int
 3523 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
 3524 {
 3525         int i, nmcast;
 3526         uint32_t count = 0;
 3527         uint8_t *mcast;
 3528 
 3529         nmcast = ha->hw.nmcast;
 3530 
 3531         QL_DPRINT2(ha, (ha->pci_dev,
 3532                 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
 3533 
 3534         mcast = ha->hw.mac_addr_arr;
 3535         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
 3536 
 3537         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
 3538                 if ((ha->hw.mcast[i].addr[0] != 0) || 
 3539                         (ha->hw.mcast[i].addr[1] != 0) ||
 3540                         (ha->hw.mcast[i].addr[2] != 0) ||
 3541                         (ha->hw.mcast[i].addr[3] != 0) ||
 3542                         (ha->hw.mcast[i].addr[4] != 0) ||
 3543                         (ha->hw.mcast[i].addr[5] != 0)) {
 3544                         bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
 3545                         mcast = mcast + ETHER_ADDR_LEN;
 3546                         count++;
 3547 
 3548                         device_printf(ha->pci_dev,
 3549                                 "%s: %x:%x:%x:%x:%x:%x \n",
 3550                                 __func__, ha->hw.mcast[i].addr[0],
 3551                                 ha->hw.mcast[i].addr[1], ha->hw.mcast[i].addr[2],
 3552                                 ha->hw.mcast[i].addr[3], ha->hw.mcast[i].addr[4],
 3553                                 ha->hw.mcast[i].addr[5]);
 3554                         
 3555                         if (count == Q8_MAX_MAC_ADDRS) {
 3556                                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
 3557                                         add_mcast, count)) {
 3558                                         device_printf(ha->pci_dev,
 3559                                                 "%s: failed\n", __func__);
 3560                                         return (-1);
 3561                                 }
 3562 
 3563                                 count = 0;
 3564                                 mcast = ha->hw.mac_addr_arr;
 3565                                 memset(mcast, 0,
 3566                                         (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
 3567                         }
 3568 
 3569                         nmcast--;
 3570                 }
 3571         }
 3572 
 3573         if (count) {
 3574                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
 3575                         count)) {
 3576                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
 3577                         return (-1);
 3578                 }
 3579         }
 3580         QL_DPRINT2(ha, (ha->pci_dev,
 3581                 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
 3582 
 3583         return 0;
 3584 }
 3585 
 3586 static int
 3587 qla_hw_add_all_mcast(qla_host_t *ha)
 3588 {
 3589         int ret;
 3590 
 3591         ret = qla_hw_all_mcast(ha, 1);
 3592 
 3593         return (ret);
 3594 }
 3595 
 3596 int
 3597 qla_hw_del_all_mcast(qla_host_t *ha)
 3598 {
 3599         int ret;
 3600 
 3601         ret = qla_hw_all_mcast(ha, 0);
 3602 
 3603         bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
 3604         ha->hw.nmcast = 0;
 3605 
 3606         return (ret);
 3607 }
 3608 
 3609 static int
 3610 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
 3611 {
 3612         int i;
 3613 
 3614         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
 3615                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
 3616                         return (0); /* its been already added */
 3617         }
 3618         return (-1);
 3619 }
 3620 
 3621 static int
 3622 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
 3623 {
 3624         int i;
 3625 
 3626         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
 3627                 if ((ha->hw.mcast[i].addr[0] == 0) && 
 3628                         (ha->hw.mcast[i].addr[1] == 0) &&
 3629                         (ha->hw.mcast[i].addr[2] == 0) &&
 3630                         (ha->hw.mcast[i].addr[3] == 0) &&
 3631                         (ha->hw.mcast[i].addr[4] == 0) &&
 3632                         (ha->hw.mcast[i].addr[5] == 0)) {
 3633                         bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
 3634                         ha->hw.nmcast++;        
 3635 
 3636                         mta = mta + ETHER_ADDR_LEN;
 3637                         nmcast--;
 3638 
 3639                         if (nmcast == 0)
 3640                                 break;
 3641                 }
 3642         }
 3643         return 0;
 3644 }
 3645 
 3646 static int
 3647 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
 3648 {
 3649         int i;
 3650 
 3651         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
 3652                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
 3653                         ha->hw.mcast[i].addr[0] = 0;
 3654                         ha->hw.mcast[i].addr[1] = 0;
 3655                         ha->hw.mcast[i].addr[2] = 0;
 3656                         ha->hw.mcast[i].addr[3] = 0;
 3657                         ha->hw.mcast[i].addr[4] = 0;
 3658                         ha->hw.mcast[i].addr[5] = 0;
 3659 
 3660                         ha->hw.nmcast--;        
 3661 
 3662                         mta = mta + ETHER_ADDR_LEN;
 3663                         nmcast--;
 3664 
 3665                         if (nmcast == 0)
 3666                                 break;
 3667                 }
 3668         }
 3669         return 0;
 3670 }
 3671 
 3672 /*
 3673  * Name: ql_hw_set_multi
 3674  * Function: Sets the Multicast Addresses provided by the host O.S into the
 3675  *      hardware (for the given interface)
 3676  */
 3677 int
 3678 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
 3679         uint32_t add_mac)
 3680 {
 3681         uint8_t *mta = mcast_addr;
 3682         int i;
 3683         int ret = 0;
 3684         uint32_t count = 0;
 3685         uint8_t *mcast;
 3686 
 3687         mcast = ha->hw.mac_addr_arr;
 3688         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
 3689 
 3690         for (i = 0; i < mcnt; i++) {
 3691                 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
 3692                         if (add_mac) {
 3693                                 if (qla_hw_mac_addr_present(ha, mta) != 0) {
 3694                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
 3695                                         mcast = mcast + ETHER_ADDR_LEN;
 3696                                         count++;
 3697                                 }
 3698                         } else {
 3699                                 if (qla_hw_mac_addr_present(ha, mta) == 0) {
 3700                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
 3701                                         mcast = mcast + ETHER_ADDR_LEN;
 3702                                         count++;
 3703                                 }
 3704                         }
 3705                 }
 3706                 if (count == Q8_MAX_MAC_ADDRS) {
 3707                         if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
 3708                                 add_mac, count)) {
 3709                                 device_printf(ha->pci_dev, "%s: failed\n",
 3710                                         __func__);
 3711                                 return (-1);
 3712                         }
 3713 
 3714                         if (add_mac) {
 3715                                 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
 3716                                         count);
 3717                         } else {
 3718                                 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
 3719                                         count);
 3720                         }
 3721 
 3722                         count = 0;
 3723                         mcast = ha->hw.mac_addr_arr;
 3724                         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
 3725                 }
 3726                         
 3727                 mta += Q8_MAC_ADDR_LEN;
 3728         }
 3729 
 3730         if (count) {
 3731                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
 3732                         count)) {
 3733                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
 3734                         return (-1);
 3735                 }
 3736                 if (add_mac) {
 3737                         qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
 3738                 } else {
 3739                         qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
 3740                 }
 3741         }
 3742 
 3743         return (ret);
 3744 }
 3745 
 3746 /*
 3747  * Name: ql_hw_tx_done_locked
 3748  * Function: Handle Transmit Completions
 3749  */
 3750 void
 3751 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
 3752 {
 3753         qla_tx_buf_t *txb;
 3754         qla_hw_t *hw = &ha->hw;
 3755         uint32_t comp_idx, comp_count = 0;
 3756         qla_hw_tx_cntxt_t *hw_tx_cntxt;
 3757 
 3758         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
 3759 
 3760         /* retrieve index of last entry in tx ring completed */
 3761         comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
 3762 
 3763         while (comp_idx != hw_tx_cntxt->txr_comp) {
 3764                 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
 3765 
 3766                 hw_tx_cntxt->txr_comp++;
 3767                 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
 3768                         hw_tx_cntxt->txr_comp = 0;
 3769 
 3770                 comp_count++;
 3771 
 3772                 if (txb->m_head) {
 3773                         if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
 3774 
 3775                         bus_dmamap_sync(ha->tx_tag, txb->map,
 3776                                 BUS_DMASYNC_POSTWRITE);
 3777                         bus_dmamap_unload(ha->tx_tag, txb->map);
 3778                         m_freem(txb->m_head);
 3779 
 3780                         txb->m_head = NULL;
 3781                 }
 3782         }
 3783 
 3784         hw_tx_cntxt->txr_free += comp_count;
 3785 
 3786         if (hw_tx_cntxt->txr_free > NUM_TX_DESCRIPTORS)
 3787                 device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d txr_free = %d"
 3788                         "txr_next = %d txr_comp = %d\n", __func__, __LINE__,
 3789                         txr_idx, hw_tx_cntxt->txr_free,
 3790                         hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp);
 3791 
 3792         QL_ASSERT(ha, (hw_tx_cntxt->txr_free <= NUM_TX_DESCRIPTORS), \
 3793                 ("%s [%d]: txr_idx = %d txr_free = %d txr_next = %d txr_comp = %d\n",\
 3794                 __func__, __LINE__, txr_idx, hw_tx_cntxt->txr_free, \
 3795                 hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp));
 3796 
 3797         return;
 3798 }
 3799 
 3800 void
 3801 ql_update_link_state(qla_host_t *ha)
 3802 {
 3803         uint32_t link_state = 0;
 3804         uint32_t prev_link_state;
 3805 
 3806         prev_link_state =  ha->hw.link_up;
 3807 
 3808         if (ha->ifp->if_drv_flags & IFF_DRV_RUNNING) {
 3809                 link_state = READ_REG32(ha, Q8_LINK_STATE);
 3810 
 3811                 if (ha->pci_func == 0) {
 3812                         link_state = (((link_state & 0xF) == 1)? 1 : 0);
 3813                 } else {
 3814                         link_state = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
 3815                 }
 3816         }
 3817 
 3818         atomic_store_rel_8(&ha->hw.link_up, (uint8_t)link_state);
 3819 
 3820         if (prev_link_state !=  ha->hw.link_up) {
 3821                 if (ha->hw.link_up) {
 3822                         if_link_state_change(ha->ifp, LINK_STATE_UP);
 3823                 } else {
 3824                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
 3825                 }
 3826         }
 3827         return;
 3828 }
 3829 
 3830 int
 3831 ql_hw_check_health(qla_host_t *ha)
 3832 {
 3833         uint32_t val;
 3834 
 3835         ha->hw.health_count++;
 3836 
 3837         if (ha->hw.health_count < 500)
 3838                 return 0;
 3839 
 3840         ha->hw.health_count = 0;
 3841 
 3842         val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
 3843 
 3844         if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
 3845                 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
 3846                 device_printf(ha->pci_dev, "%s: Temperature Alert"
 3847                         " at ts_usecs %ld ts_reg = 0x%08x\n",
 3848                         __func__, qla_get_usec_timestamp(), val);
 3849 
 3850                 if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_TEMP_FAILURE)
 3851                         ha->hw.sp_log_stop = -1;
 3852 
 3853                 QL_INITIATE_RECOVERY(ha);
 3854                 return -1;
 3855         }
 3856 
 3857         val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
 3858 
 3859         if ((val != ha->hw.hbeat_value) &&
 3860                 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
 3861                 ha->hw.hbeat_value = val;
 3862                 ha->hw.hbeat_failure = 0;
 3863                 return 0;
 3864         }
 3865 
 3866         ha->hw.hbeat_failure++;
 3867 
 3868         if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1))
 3869                 device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n",
 3870                         __func__, val);
 3871         if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */
 3872                 return 0;
 3873         else {
 3874                 uint32_t peg_halt_status1;
 3875                 uint32_t peg_halt_status2;
 3876 
 3877                 peg_halt_status1 = READ_REG32(ha, Q8_PEG_HALT_STATUS1);
 3878                 peg_halt_status2 = READ_REG32(ha, Q8_PEG_HALT_STATUS2);
 3879 
 3880                 device_printf(ha->pci_dev,
 3881                         "%s: Heartbeat Failue at ts_usecs = %ld "
 3882                         "fw_heart_beat = 0x%08x "
 3883                         "peg_halt_status1 = 0x%08x "
 3884                         "peg_halt_status2 = 0x%08x\n",
 3885                         __func__, qla_get_usec_timestamp(), val,
 3886                         peg_halt_status1, peg_halt_status2);
 3887 
 3888                 if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_HBEAT_FAILURE)
 3889                         ha->hw.sp_log_stop = -1;
 3890         }
 3891         QL_INITIATE_RECOVERY(ha);
 3892 
 3893         return -1;
 3894 }
 3895 
 3896 static int
 3897 qla_init_nic_func(qla_host_t *ha)
 3898 {
 3899         device_t                dev;
 3900         q80_init_nic_func_t     *init_nic;
 3901         q80_init_nic_func_rsp_t *init_nic_rsp;
 3902         uint32_t                err;
 3903 
 3904         dev = ha->pci_dev;
 3905 
 3906         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
 3907         bzero(init_nic, sizeof(q80_init_nic_func_t));
 3908 
 3909         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
 3910         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
 3911         init_nic->count_version |= Q8_MBX_CMD_VERSION;
 3912 
 3913         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
 3914         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
 3915         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
 3916 
 3917 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
 3918         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
 3919                 (sizeof (q80_init_nic_func_t) >> 2),
 3920                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
 3921                 device_printf(dev, "%s: failed\n", __func__);
 3922                 return -1;
 3923         }
 3924 
 3925         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
 3926 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
 3927 
 3928         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
 3929 
 3930         if (err) {
 3931                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 3932         } else {
 3933                 device_printf(dev, "%s: successful\n", __func__);
 3934         }
 3935 
 3936         return 0;
 3937 }
 3938 
 3939 static int
 3940 qla_stop_nic_func(qla_host_t *ha)
 3941 {
 3942         device_t                dev;
 3943         q80_stop_nic_func_t     *stop_nic;
 3944         q80_stop_nic_func_rsp_t *stop_nic_rsp;
 3945         uint32_t                err;
 3946 
 3947         dev = ha->pci_dev;
 3948 
 3949         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
 3950         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
 3951 
 3952         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
 3953         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
 3954         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
 3955 
 3956         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
 3957         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
 3958 
 3959 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
 3960         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
 3961                 (sizeof (q80_stop_nic_func_t) >> 2),
 3962                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
 3963                 device_printf(dev, "%s: failed\n", __func__);
 3964                 return -1;
 3965         }
 3966 
 3967         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
 3968 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
 3969 
 3970         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
 3971 
 3972         if (err) {
 3973                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 3974         }
 3975 
 3976         return 0;
 3977 }
 3978 
 3979 static int
 3980 qla_query_fw_dcbx_caps(qla_host_t *ha)
 3981 {
 3982         device_t                        dev;
 3983         q80_query_fw_dcbx_caps_t        *fw_dcbx;
 3984         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
 3985         uint32_t                        err;
 3986 
 3987         dev = ha->pci_dev;
 3988 
 3989         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
 3990         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
 3991 
 3992         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
 3993         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
 3994         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
 3995 
 3996         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
 3997         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
 3998                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
 3999                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
 4000                 device_printf(dev, "%s: failed\n", __func__);
 4001                 return -1;
 4002         }
 4003 
 4004         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
 4005         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
 4006                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
 4007 
 4008         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
 4009 
 4010         if (err) {
 4011                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 4012         }
 4013 
 4014         return 0;
 4015 }
 4016 
 4017 static int
 4018 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
 4019         uint32_t aen_mb3, uint32_t aen_mb4)
 4020 {
 4021         device_t                dev;
 4022         q80_idc_ack_t           *idc_ack;
 4023         q80_idc_ack_rsp_t       *idc_ack_rsp;
 4024         uint32_t                err;
 4025         int                     count = 300;
 4026 
 4027         dev = ha->pci_dev;
 4028 
 4029         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
 4030         bzero(idc_ack, sizeof(q80_idc_ack_t));
 4031 
 4032         idc_ack->opcode = Q8_MBX_IDC_ACK;
 4033         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
 4034         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
 4035 
 4036         idc_ack->aen_mb1 = aen_mb1;
 4037         idc_ack->aen_mb2 = aen_mb2;
 4038         idc_ack->aen_mb3 = aen_mb3;
 4039         idc_ack->aen_mb4 = aen_mb4;
 4040 
 4041         ha->hw.imd_compl= 0;
 4042 
 4043         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
 4044                 (sizeof (q80_idc_ack_t) >> 2),
 4045                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
 4046                 device_printf(dev, "%s: failed\n", __func__);
 4047                 return -1;
 4048         }
 4049 
 4050         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
 4051 
 4052         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
 4053 
 4054         if (err) {
 4055                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 4056                 return(-1);
 4057         }
 4058 
 4059         while (count && !ha->hw.imd_compl) {
 4060                 qla_mdelay(__func__, 100);
 4061                 count--;
 4062         }
 4063 
 4064         if (!count)
 4065                 return -1;
 4066         else
 4067                 device_printf(dev, "%s: count %d\n", __func__, count);
 4068 
 4069         return (0);
 4070 }
 4071 
 4072 static int
 4073 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
 4074 {
 4075         device_t                dev;
 4076         q80_set_port_cfg_t      *pcfg;
 4077         q80_set_port_cfg_rsp_t  *pfg_rsp;
 4078         uint32_t                err;
 4079         int                     count = 300;
 4080 
 4081         dev = ha->pci_dev;
 4082 
 4083         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
 4084         bzero(pcfg, sizeof(q80_set_port_cfg_t));
 4085 
 4086         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
 4087         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
 4088         pcfg->count_version |= Q8_MBX_CMD_VERSION;
 4089 
 4090         pcfg->cfg_bits = cfg_bits;
 4091 
 4092         device_printf(dev, "%s: cfg_bits"
 4093                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
 4094                 " [0x%x, 0x%x, 0x%x]\n", __func__,
 4095                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
 4096                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
 4097                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
 4098 
 4099         ha->hw.imd_compl= 0;
 4100 
 4101         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
 4102                 (sizeof (q80_set_port_cfg_t) >> 2),
 4103                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
 4104                 device_printf(dev, "%s: failed\n", __func__);
 4105                 return -1;
 4106         }
 4107 
 4108         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
 4109 
 4110         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
 4111 
 4112         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
 4113                 while (count && !ha->hw.imd_compl) {
 4114                         qla_mdelay(__func__, 100);
 4115                         count--;
 4116                 }
 4117                 if (count) {
 4118                         device_printf(dev, "%s: count %d\n", __func__, count);
 4119 
 4120                         err = 0;
 4121                 }
 4122         }
 4123 
 4124         if (err) {
 4125                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 4126                 return(-1);
 4127         }
 4128 
 4129         return (0);
 4130 }
 4131 
 4132 static int
 4133 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
 4134 {
 4135         uint32_t                        err;
 4136         device_t                        dev = ha->pci_dev;
 4137         q80_config_md_templ_size_t      *md_size;
 4138         q80_config_md_templ_size_rsp_t  *md_size_rsp;
 4139 
 4140 #ifndef QL_LDFLASH_FW
 4141 
 4142         ql_minidump_template_hdr_t *hdr;
 4143 
 4144         hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
 4145         *size = hdr->size_of_template;
 4146         return (0);
 4147 
 4148 #endif /* #ifdef QL_LDFLASH_FW */
 4149 
 4150         md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
 4151         bzero(md_size, sizeof(q80_config_md_templ_size_t));
 4152 
 4153         md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
 4154         md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
 4155         md_size->count_version |= Q8_MBX_CMD_VERSION;
 4156 
 4157         if (qla_mbx_cmd(ha, (uint32_t *) md_size,
 4158                 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
 4159                 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
 4160                 device_printf(dev, "%s: failed\n", __func__);
 4161 
 4162                 return (-1);
 4163         }
 4164 
 4165         md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
 4166 
 4167         err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
 4168 
 4169         if (err) {
 4170                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 4171                 return(-1);
 4172         }
 4173 
 4174         *size = md_size_rsp->templ_size;
 4175 
 4176         return (0);
 4177 }
 4178 
 4179 static int
 4180 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
 4181 {
 4182         device_t                dev;
 4183         q80_get_port_cfg_t      *pcfg;
 4184         q80_get_port_cfg_rsp_t  *pcfg_rsp;
 4185         uint32_t                err;
 4186 
 4187         dev = ha->pci_dev;
 4188 
 4189         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
 4190         bzero(pcfg, sizeof(q80_get_port_cfg_t));
 4191 
 4192         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
 4193         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
 4194         pcfg->count_version |= Q8_MBX_CMD_VERSION;
 4195 
 4196         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
 4197                 (sizeof (q80_get_port_cfg_t) >> 2),
 4198                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
 4199                 device_printf(dev, "%s: failed\n", __func__);
 4200                 return -1;
 4201         }
 4202 
 4203         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
 4204 
 4205         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
 4206 
 4207         if (err) {
 4208                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 4209                 return(-1);
 4210         }
 4211 
 4212         device_printf(dev, "%s: [cfg_bits, port type]"
 4213                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
 4214                 " [0x%x, 0x%x, 0x%x]\n", __func__,
 4215                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
 4216                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
 4217                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
 4218                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
 4219                 );
 4220 
 4221         *cfg_bits = pcfg_rsp->cfg_bits;
 4222 
 4223         return (0);
 4224 }
 4225 
 4226 int
 4227 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
 4228 {
 4229         struct ether_vlan_header        *eh;
 4230         uint16_t                        etype;
 4231         struct ip                       *ip = NULL;
 4232         struct ip6_hdr                  *ip6 = NULL;
 4233         struct tcphdr                   *th = NULL;
 4234         uint32_t                        hdrlen;
 4235         uint32_t                        offset;
 4236         uint8_t                         buf[sizeof(struct ip6_hdr)];
 4237 
 4238         eh = mtod(mp, struct ether_vlan_header *);
 4239 
 4240         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 4241                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 4242                 etype = ntohs(eh->evl_proto);
 4243         } else {
 4244                 hdrlen = ETHER_HDR_LEN;
 4245                 etype = ntohs(eh->evl_encap_proto);
 4246         }
 4247 
 4248         if (etype == ETHERTYPE_IP) {
 4249                 offset = (hdrlen + sizeof (struct ip));
 4250 
 4251                 if (mp->m_len >= offset) {
 4252                         ip = (struct ip *)(mp->m_data + hdrlen);
 4253                 } else {
 4254                         m_copydata(mp, hdrlen, sizeof (struct ip), buf);
 4255                         ip = (struct ip *)buf;
 4256                 }
 4257 
 4258                 if (ip->ip_p == IPPROTO_TCP) {
 4259                         hdrlen += ip->ip_hl << 2;
 4260                         offset = hdrlen + 4;
 4261 
 4262                         if (mp->m_len >= offset) {
 4263                                 th = (struct tcphdr *)(mp->m_data + hdrlen);
 4264                         } else {
 4265                                 m_copydata(mp, hdrlen, 4, buf);
 4266                                 th = (struct tcphdr *)buf;
 4267                         }
 4268                 }
 4269 
 4270         } else if (etype == ETHERTYPE_IPV6) {
 4271                 offset = (hdrlen + sizeof (struct ip6_hdr));
 4272 
 4273                 if (mp->m_len >= offset) {
 4274                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
 4275                 } else {
 4276                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
 4277                         ip6 = (struct ip6_hdr *)buf;
 4278                 }
 4279 
 4280                 if (ip6->ip6_nxt == IPPROTO_TCP) {
 4281                         hdrlen += sizeof(struct ip6_hdr);
 4282                         offset = hdrlen + 4;
 4283 
 4284                         if (mp->m_len >= offset) {
 4285                                 th = (struct tcphdr *)(mp->m_data + hdrlen);
 4286                         } else {
 4287                                 m_copydata(mp, hdrlen, 4, buf);
 4288                                 th = (struct tcphdr *)buf;
 4289                         }
 4290                 }
 4291         }
 4292 
 4293         if (th != NULL) {
 4294                 if ((th->th_sport == htons(3260)) ||
 4295                         (th->th_dport == htons(3260)))
 4296                         return 0;
 4297         }
 4298         return (-1);
 4299 }
 4300 
 4301 void
 4302 qla_hw_async_event(qla_host_t *ha)
 4303 {
 4304         switch (ha->hw.aen_mb0) {
 4305         case 0x8101:
 4306                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
 4307                         ha->hw.aen_mb3, ha->hw.aen_mb4);
 4308 
 4309                 break;
 4310 
 4311         default:
 4312                 break;
 4313         }
 4314 
 4315         return;
 4316 }
 4317 
 4318 #ifdef QL_LDFLASH_FW
 4319 static int
 4320 ql_get_minidump_template(qla_host_t *ha)
 4321 {
 4322         uint32_t                        err;
 4323         device_t                        dev = ha->pci_dev;
 4324         q80_config_md_templ_cmd_t       *md_templ;
 4325         q80_config_md_templ_cmd_rsp_t   *md_templ_rsp;
 4326 
 4327         md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
 4328         bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
 4329 
 4330         md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
 4331         md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
 4332         md_templ->count_version |= Q8_MBX_CMD_VERSION;
 4333 
 4334         md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
 4335         md_templ->buff_size = ha->hw.dma_buf.minidump.size;
 4336 
 4337         if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
 4338                 (sizeof(q80_config_md_templ_cmd_t) >> 2),
 4339                  ha->hw.mbox,
 4340                 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
 4341                 device_printf(dev, "%s: failed\n", __func__);
 4342 
 4343                 return (-1);
 4344         }
 4345 
 4346         md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
 4347 
 4348         err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
 4349 
 4350         if (err) {
 4351                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
 4352                 return (-1);
 4353         }
 4354 
 4355         return (0);
 4356 
 4357 }
 4358 #endif /* #ifdef QL_LDFLASH_FW */
 4359 
 4360 /*
 4361  * Minidump related functionality 
 4362  */
 4363 
 4364 static int ql_parse_template(qla_host_t *ha);
 4365 
 4366 static uint32_t ql_rdcrb(qla_host_t *ha,
 4367                         ql_minidump_entry_rdcrb_t *crb_entry,
 4368                         uint32_t * data_buff);
 4369 
 4370 static uint32_t ql_pollrd(qla_host_t *ha,
 4371                         ql_minidump_entry_pollrd_t *entry,
 4372                         uint32_t * data_buff);
 4373 
 4374 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
 4375                         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
 4376                         uint32_t *data_buff);
 4377 
 4378 static uint32_t ql_L2Cache(qla_host_t *ha,
 4379                         ql_minidump_entry_cache_t *cacheEntry,
 4380                         uint32_t * data_buff);
 4381 
 4382 static uint32_t ql_L1Cache(qla_host_t *ha,
 4383                         ql_minidump_entry_cache_t *cacheEntry,
 4384                         uint32_t *data_buff);
 4385 
 4386 static uint32_t ql_rdocm(qla_host_t *ha,
 4387                         ql_minidump_entry_rdocm_t *ocmEntry,
 4388                         uint32_t *data_buff);
 4389 
 4390 static uint32_t ql_rdmem(qla_host_t *ha,
 4391                         ql_minidump_entry_rdmem_t *mem_entry,
 4392                         uint32_t *data_buff);
 4393 
 4394 static uint32_t ql_rdrom(qla_host_t *ha,
 4395                         ql_minidump_entry_rdrom_t *romEntry,
 4396                         uint32_t *data_buff);
 4397 
 4398 static uint32_t ql_rdmux(qla_host_t *ha,
 4399                         ql_minidump_entry_mux_t *muxEntry,
 4400                         uint32_t *data_buff);
 4401 
 4402 static uint32_t ql_rdmux2(qla_host_t *ha,
 4403                         ql_minidump_entry_mux2_t *muxEntry,
 4404                         uint32_t *data_buff);
 4405 
 4406 static uint32_t ql_rdqueue(qla_host_t *ha,
 4407                         ql_minidump_entry_queue_t *queueEntry,
 4408                         uint32_t *data_buff);
 4409 
 4410 static uint32_t ql_cntrl(qla_host_t *ha,
 4411                         ql_minidump_template_hdr_t *template_hdr,
 4412                         ql_minidump_entry_cntrl_t *crbEntry);
 4413 
 4414 static uint32_t
 4415 ql_minidump_size(qla_host_t *ha)
 4416 {
 4417         uint32_t i, k;
 4418         uint32_t size = 0;
 4419         ql_minidump_template_hdr_t *hdr;
 4420 
 4421         hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
 4422 
 4423         i = 0x2;
 4424 
 4425         for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
 4426                 if (i & ha->hw.mdump_capture_mask)
 4427                         size += hdr->capture_size_array[k];
 4428                 i = i << 1;
 4429         }
 4430         return (size);
 4431 }
 4432 
 4433 static void
 4434 ql_free_minidump_buffer(qla_host_t *ha)
 4435 {
 4436         if (ha->hw.mdump_buffer != NULL) {
 4437                 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
 4438                 ha->hw.mdump_buffer = NULL;
 4439                 ha->hw.mdump_buffer_size = 0;
 4440         }
 4441         return;
 4442 }
 4443 
 4444 static int
 4445 ql_alloc_minidump_buffer(qla_host_t *ha)
 4446 {
 4447         ha->hw.mdump_buffer_size = ql_minidump_size(ha);
 4448 
 4449         if (!ha->hw.mdump_buffer_size)
 4450                 return (-1);
 4451 
 4452         ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
 4453                                         M_NOWAIT);
 4454 
 4455         if (ha->hw.mdump_buffer == NULL)
 4456                 return (-1);
 4457 
 4458         return (0);
 4459 }
 4460 
 4461 static void
 4462 ql_free_minidump_template_buffer(qla_host_t *ha)
 4463 {
 4464         if (ha->hw.mdump_template != NULL) {
 4465                 free(ha->hw.mdump_template, M_QLA83XXBUF);
 4466                 ha->hw.mdump_template = NULL;
 4467                 ha->hw.mdump_template_size = 0;
 4468         }
 4469         return;
 4470 }
 4471 
 4472 static int
 4473 ql_alloc_minidump_template_buffer(qla_host_t *ha)
 4474 {
 4475         ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
 4476 
 4477         ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
 4478                                         M_QLA83XXBUF, M_NOWAIT);
 4479 
 4480         if (ha->hw.mdump_template == NULL)
 4481                 return (-1);
 4482 
 4483         return (0);
 4484 }
 4485 
 4486 static int
 4487 ql_alloc_minidump_buffers(qla_host_t *ha)
 4488 {
 4489         int ret;
 4490 
 4491         ret = ql_alloc_minidump_template_buffer(ha);
 4492 
 4493         if (ret)
 4494                 return (ret);
 4495 
 4496         ret = ql_alloc_minidump_buffer(ha);
 4497 
 4498         if (ret)
 4499                 ql_free_minidump_template_buffer(ha);
 4500 
 4501         return (ret);
 4502 }
 4503 
 4504 static uint32_t
 4505 ql_validate_minidump_checksum(qla_host_t *ha)
 4506 {
 4507         uint64_t sum = 0;
 4508         int count;
 4509         uint32_t *template_buff;
 4510 
 4511         count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
 4512         template_buff = ha->hw.dma_buf.minidump.dma_b;
 4513 
 4514         while (count-- > 0) {
 4515                 sum += *template_buff++;
 4516         }
 4517 
 4518         while (sum >> 32) {
 4519                 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
 4520         }
 4521 
 4522         return (~sum);
 4523 }
 4524 
 4525 int
 4526 ql_minidump_init(qla_host_t *ha)
 4527 {
 4528         int             ret = 0;
 4529         uint32_t        template_size = 0;
 4530         device_t        dev = ha->pci_dev;
 4531 
 4532         /*
 4533          * Get Minidump Template Size
 4534          */
 4535         ret = qla_get_minidump_tmplt_size(ha, &template_size);
 4536 
 4537         if (ret || (template_size == 0)) {
 4538                 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
 4539                         template_size);
 4540                 return (-1);
 4541         }
 4542 
 4543         /*
 4544          * Allocate Memory for Minidump Template
 4545          */
 4546 
 4547         ha->hw.dma_buf.minidump.alignment = 8;
 4548         ha->hw.dma_buf.minidump.size = template_size;
 4549 
 4550 #ifdef QL_LDFLASH_FW
 4551         if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
 4552                 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
 4553 
 4554                 return (-1);
 4555         }
 4556         ha->hw.dma_buf.flags.minidump = 1;
 4557 
 4558         /*
 4559          * Retrieve Minidump Template
 4560          */
 4561         ret = ql_get_minidump_template(ha);
 4562 #else
 4563         ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
 4564 
 4565 #endif /* #ifdef QL_LDFLASH_FW */
 4566 
 4567         if (ret == 0) {
 4568                 ret = ql_validate_minidump_checksum(ha);
 4569 
 4570                 if (ret == 0) {
 4571                         ret = ql_alloc_minidump_buffers(ha);
 4572 
 4573                         if (ret == 0)
 4574                 ha->hw.mdump_init = 1;
 4575                         else
 4576                                 device_printf(dev,
 4577                                         "%s: ql_alloc_minidump_buffers"
 4578                                         " failed\n", __func__);
 4579                 } else {
 4580                         device_printf(dev, "%s: ql_validate_minidump_checksum"
 4581                                 " failed\n", __func__);
 4582                 }
 4583         } else {
 4584                 device_printf(dev, "%s: ql_get_minidump_template failed\n",
 4585                          __func__);
 4586         }
 4587 
 4588         if (ret)
 4589                 ql_minidump_free(ha);
 4590 
 4591         return (ret);
 4592 }
 4593 
 4594 static void
 4595 ql_minidump_free(qla_host_t *ha)
 4596 {
 4597         ha->hw.mdump_init = 0;
 4598         if (ha->hw.dma_buf.flags.minidump) {
 4599                 ha->hw.dma_buf.flags.minidump = 0;
 4600                 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
 4601         }
 4602 
 4603         ql_free_minidump_template_buffer(ha);
 4604         ql_free_minidump_buffer(ha);
 4605 
 4606         return;
 4607 }
 4608 
 4609 void
 4610 ql_minidump(qla_host_t *ha)
 4611 {
 4612         if (!ha->hw.mdump_init)
 4613                 return;
 4614 
 4615         if (ha->hw.mdump_done)
 4616                 return;
 4617         ha->hw.mdump_usec_ts = qla_get_usec_timestamp();
 4618         ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
 4619 
 4620         bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
 4621         bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
 4622 
 4623         bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
 4624                 ha->hw.mdump_template_size);
 4625 
 4626         ql_parse_template(ha);
 4627 
 4628         ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
 4629 
 4630         ha->hw.mdump_done = 1;
 4631 
 4632         return;
 4633 }
 4634 
 4635 /*
 4636  * helper routines
 4637  */
 4638 static void 
 4639 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
 4640 {
 4641         if (esize != entry->hdr.entry_capture_size) {
 4642                 entry->hdr.entry_capture_size = esize;
 4643                 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
 4644         }
 4645         return;
 4646 }
 4647 
 4648 static int 
 4649 ql_parse_template(qla_host_t *ha)
 4650 {
 4651         uint32_t num_of_entries, buff_level, e_cnt, esize;
 4652         uint32_t rv = 0;
 4653         char *dump_buff, *dbuff;
 4654         int sane_start = 0, sane_end = 0;
 4655         ql_minidump_template_hdr_t *template_hdr;
 4656         ql_minidump_entry_t *entry;
 4657         uint32_t capture_mask; 
 4658         uint32_t dump_size; 
 4659 
 4660         /* Setup parameters */
 4661         template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
 4662 
 4663         if (template_hdr->entry_type == TLHDR)
 4664                 sane_start = 1;
 4665 
 4666         dump_buff = (char *) ha->hw.mdump_buffer;
 4667 
 4668         num_of_entries = template_hdr->num_of_entries;
 4669 
 4670         entry = (ql_minidump_entry_t *) ((char *)template_hdr 
 4671                         + template_hdr->first_entry_offset );
 4672 
 4673         template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
 4674                 template_hdr->ocm_window_array[ha->pci_func];
 4675         template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
 4676 
 4677         capture_mask = ha->hw.mdump_capture_mask;
 4678         dump_size = ha->hw.mdump_buffer_size;
 4679 
 4680         template_hdr->driver_capture_mask = capture_mask;
 4681 
 4682         QL_DPRINT80(ha, (ha->pci_dev,
 4683                 "%s: sane_start = %d num_of_entries = %d "
 4684                 "capture_mask = 0x%x dump_size = %d \n", 
 4685                 __func__, sane_start, num_of_entries, capture_mask, dump_size));
 4686 
 4687         for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
 4688                 /*
 4689                  * If the capture_mask of the entry does not match capture mask
 4690                  * skip the entry after marking the driver_flags indicator.
 4691                  */
 4692                 
 4693                 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
 4694                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
 4695                         entry = (ql_minidump_entry_t *) ((char *) entry
 4696                                         + entry->hdr.entry_size);
 4697                         continue;
 4698                 }
 4699 
 4700                 /*
 4701                  * This is ONLY needed in implementations where
 4702                  * the capture buffer allocated is too small to capture
 4703                  * all of the required entries for a given capture mask.
 4704                  * We need to empty the buffer contents to a file
 4705                  * if possible, before processing the next entry
 4706                  * If the buff_full_flag is set, no further capture will happen
 4707                  * and all remaining non-control entries will be skipped.
 4708                  */
 4709                 if (entry->hdr.entry_capture_size != 0) {
 4710                         if ((buff_level + entry->hdr.entry_capture_size) >
 4711                                 dump_size) {
 4712                                 /*  Try to recover by emptying buffer to file */
 4713                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
 4714                                 entry = (ql_minidump_entry_t *) ((char *) entry
 4715                                                 + entry->hdr.entry_size);
 4716                                 continue;
 4717                         }
 4718                 }
 4719 
 4720                 /*
 4721                  * Decode the entry type and process it accordingly
 4722                  */
 4723 
 4724                 switch (entry->hdr.entry_type) {
 4725                 case RDNOP:
 4726                         break;
 4727 
 4728                 case RDEND:
 4729                         sane_end++;
 4730                         break;
 4731 
 4732                 case RDCRB:
 4733                         dbuff = dump_buff + buff_level;
 4734                         esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
 4735                         ql_entry_err_chk(entry, esize);
 4736                         buff_level += esize;
 4737                         break;
 4738 
 4739                 case POLLRD:
 4740                         dbuff = dump_buff + buff_level;
 4741                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
 4742                         ql_entry_err_chk(entry, esize);
 4743                         buff_level += esize;
 4744                         break;
 4745 
 4746                 case POLLRDMWR:
 4747                         dbuff = dump_buff + buff_level;
 4748                         esize = ql_pollrd_modify_write(ha, (void *)entry,
 4749                                         (void *)dbuff);
 4750                         ql_entry_err_chk(entry, esize);
 4751                         buff_level += esize;
 4752                         break;
 4753 
 4754                 case L2ITG:
 4755                 case L2DTG:
 4756                 case L2DAT:
 4757                 case L2INS:
 4758                         dbuff = dump_buff + buff_level;
 4759                         esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
 4760                         if (esize == -1) {
 4761                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
 4762                         } else {
 4763                                 ql_entry_err_chk(entry, esize);
 4764                                 buff_level += esize;
 4765                         }
 4766                         break;
 4767 
 4768                 case L1DAT:
 4769                 case L1INS:
 4770                         dbuff = dump_buff + buff_level;
 4771                         esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
 4772                         ql_entry_err_chk(entry, esize);
 4773                         buff_level += esize;
 4774                         break;
 4775 
 4776                 case RDOCM:
 4777                         dbuff = dump_buff + buff_level;
 4778                         esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
 4779                         ql_entry_err_chk(entry, esize);
 4780                         buff_level += esize;
 4781                         break;
 4782 
 4783                 case RDMEM:
 4784                         dbuff = dump_buff + buff_level;
 4785                         esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
 4786                         ql_entry_err_chk(entry, esize);
 4787                         buff_level += esize;
 4788                         break;
 4789 
 4790                 case BOARD:
 4791                 case RDROM:
 4792                         dbuff = dump_buff + buff_level;
 4793                         esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
 4794                         ql_entry_err_chk(entry, esize);
 4795                         buff_level += esize;
 4796                         break;
 4797 
 4798                 case RDMUX:
 4799                         dbuff = dump_buff + buff_level;
 4800                         esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
 4801                         ql_entry_err_chk(entry, esize);
 4802                         buff_level += esize;
 4803                         break;
 4804 
 4805                 case RDMUX2:
 4806                         dbuff = dump_buff + buff_level;
 4807                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
 4808                         ql_entry_err_chk(entry, esize);
 4809                         buff_level += esize;
 4810                         break;
 4811 
 4812                 case QUEUE:
 4813                         dbuff = dump_buff + buff_level;
 4814                         esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
 4815                         ql_entry_err_chk(entry, esize);
 4816                         buff_level += esize;
 4817                         break;
 4818 
 4819                 case CNTRL:
 4820                         if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
 4821                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
 4822                         }
 4823                         break;
 4824                 default:
 4825                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
 4826                         break;
 4827                 }
 4828                 /*  next entry in the template */
 4829                 entry = (ql_minidump_entry_t *) ((char *) entry
 4830                                                 + entry->hdr.entry_size);
 4831         }
 4832 
 4833         if (!sane_start || (sane_end > 1)) {
 4834                 device_printf(ha->pci_dev,
 4835                         "\n%s: Template configuration error. Check Template\n",
 4836                         __func__);
 4837         }
 4838 
 4839         QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
 4840                 __func__, template_hdr->num_of_entries));
 4841 
 4842         return 0;
 4843 }
 4844 
 4845 /*
 4846  * Read CRB operation.
 4847  */
 4848 static uint32_t
 4849 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
 4850         uint32_t * data_buff)
 4851 {
 4852         int loop_cnt;
 4853         int ret;
 4854         uint32_t op_count, addr, stride, value = 0;
 4855 
 4856         addr = crb_entry->addr;
 4857         op_count = crb_entry->op_count;
 4858         stride = crb_entry->addr_stride;
 4859 
 4860         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
 4861                 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
 4862 
 4863                 if (ret)
 4864                         return (0);
 4865 
 4866                 *data_buff++ = addr;
 4867                 *data_buff++ = value;
 4868                 addr = addr + stride;
 4869         }
 4870 
 4871         /*
 4872          * for testing purpose we return amount of data written
 4873          */
 4874         return (op_count * (2 * sizeof(uint32_t)));
 4875 }
 4876 
 4877 /*
 4878  * Handle L2 Cache.
 4879  */
 4880 
 4881 static uint32_t 
 4882 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
 4883         uint32_t * data_buff)
 4884 {
 4885         int i, k;
 4886         int loop_cnt;
 4887         int ret;
 4888 
 4889         uint32_t read_value;
 4890         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
 4891         uint32_t tag_value, read_cnt;
 4892         volatile uint8_t cntl_value_r;
 4893         long timeout;
 4894         uint32_t data;
 4895 
 4896         loop_cnt = cacheEntry->op_count;
 4897 
 4898         read_addr = cacheEntry->read_addr;
 4899         cntrl_addr = cacheEntry->control_addr;
 4900         cntl_value_w = (uint32_t) cacheEntry->write_value;
 4901 
 4902         tag_reg_addr = cacheEntry->tag_reg_addr;
 4903 
 4904         tag_value = cacheEntry->init_tag_value;
 4905         read_cnt = cacheEntry->read_addr_cnt;
 4906 
 4907         for (i = 0; i < loop_cnt; i++) {
 4908                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
 4909                 if (ret)
 4910                         return (0);
 4911 
 4912                 if (cacheEntry->write_value != 0) { 
 4913 
 4914                         ret = ql_rdwr_indreg32(ha, cntrl_addr,
 4915                                         &cntl_value_w, 0);
 4916                         if (ret)
 4917                                 return (0);
 4918                 }
 4919 
 4920                 if (cacheEntry->poll_mask != 0) { 
 4921 
 4922                         timeout = cacheEntry->poll_wait;
 4923 
 4924                         ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
 4925                         if (ret)
 4926                                 return (0);
 4927 
 4928                         cntl_value_r = (uint8_t)data;
 4929 
 4930                         while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
 4931                                 if (timeout) {
 4932                                         qla_mdelay(__func__, 1);
 4933                                         timeout--;
 4934                                 } else
 4935                                         break;
 4936 
 4937                                 ret = ql_rdwr_indreg32(ha, cntrl_addr,
 4938                                                 &data, 1);
 4939                                 if (ret)
 4940                                         return (0);
 4941 
 4942                                 cntl_value_r = (uint8_t)data;
 4943                         }
 4944                         if (!timeout) {
 4945                                 /* Report timeout error. 
 4946                                  * core dump capture failed
 4947                                  * Skip remaining entries.
 4948                                  * Write buffer out to file
 4949                                  * Use driver specific fields in template header
 4950                                  * to report this error.
 4951                                  */
 4952                                 return (-1);
 4953                         }
 4954                 }
 4955 
 4956                 addr = read_addr;
 4957                 for (k = 0; k < read_cnt; k++) {
 4958                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
 4959                         if (ret)
 4960                                 return (0);
 4961 
 4962                         *data_buff++ = read_value;
 4963                         addr += cacheEntry->read_addr_stride;
 4964                 }
 4965 
 4966                 tag_value += cacheEntry->tag_value_stride;
 4967         }
 4968 
 4969         return (read_cnt * loop_cnt * sizeof(uint32_t));
 4970 }
 4971 
 4972 /*
 4973  * Handle L1 Cache.
 4974  */
 4975 
 4976 static uint32_t 
 4977 ql_L1Cache(qla_host_t *ha,
 4978         ql_minidump_entry_cache_t *cacheEntry,
 4979         uint32_t *data_buff)
 4980 {
 4981         int ret;
 4982         int i, k;
 4983         int loop_cnt;
 4984 
 4985         uint32_t read_value;
 4986         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
 4987         uint32_t tag_value, read_cnt;
 4988         uint32_t cntl_value_w;
 4989 
 4990         loop_cnt = cacheEntry->op_count;
 4991 
 4992         read_addr = cacheEntry->read_addr;
 4993         cntrl_addr = cacheEntry->control_addr;
 4994         cntl_value_w = (uint32_t) cacheEntry->write_value;
 4995 
 4996         tag_reg_addr = cacheEntry->tag_reg_addr;
 4997 
 4998         tag_value = cacheEntry->init_tag_value;
 4999         read_cnt = cacheEntry->read_addr_cnt;
 5000 
 5001         for (i = 0; i < loop_cnt; i++) {
 5002                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
 5003                 if (ret)
 5004                         return (0);
 5005 
 5006                 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
 5007                 if (ret)
 5008                         return (0);
 5009 
 5010                 addr = read_addr;
 5011                 for (k = 0; k < read_cnt; k++) {
 5012                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
 5013                         if (ret)
 5014                                 return (0);
 5015 
 5016                         *data_buff++ = read_value;
 5017                         addr += cacheEntry->read_addr_stride;
 5018                 }
 5019 
 5020                 tag_value += cacheEntry->tag_value_stride;
 5021         }
 5022 
 5023         return (read_cnt * loop_cnt * sizeof(uint32_t));
 5024 }
 5025 
 5026 /*
 5027  * Reading OCM memory
 5028  */
 5029 
 5030 static uint32_t 
 5031 ql_rdocm(qla_host_t *ha,
 5032         ql_minidump_entry_rdocm_t *ocmEntry,
 5033         uint32_t *data_buff)
 5034 {
 5035         int i, loop_cnt;
 5036         volatile uint32_t addr;
 5037         volatile uint32_t value;
 5038 
 5039         addr = ocmEntry->read_addr;
 5040         loop_cnt = ocmEntry->op_count;
 5041 
 5042         for (i = 0; i < loop_cnt; i++) {
 5043                 value = READ_REG32(ha, addr);
 5044                 *data_buff++ = value;
 5045                 addr += ocmEntry->read_addr_stride;
 5046         }
 5047         return (loop_cnt * sizeof(value));
 5048 }
 5049 
 5050 /*
 5051  * Read memory
 5052  */
 5053 
 5054 static uint32_t 
 5055 ql_rdmem(qla_host_t *ha,
 5056         ql_minidump_entry_rdmem_t *mem_entry,
 5057         uint32_t *data_buff)
 5058 {
 5059         int ret;
 5060         int i, loop_cnt;
 5061         volatile uint32_t addr;
 5062         q80_offchip_mem_val_t val;
 5063 
 5064         addr = mem_entry->read_addr;
 5065 
 5066         /* size in bytes / 16 */
 5067         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
 5068 
 5069         for (i = 0; i < loop_cnt; i++) {
 5070                 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
 5071                 if (ret)
 5072                         return (0);
 5073 
 5074                 *data_buff++ = val.data_lo;
 5075                 *data_buff++ = val.data_hi;
 5076                 *data_buff++ = val.data_ulo;
 5077                 *data_buff++ = val.data_uhi;
 5078 
 5079                 addr += (sizeof(uint32_t) * 4);
 5080         }
 5081 
 5082         return (loop_cnt * (sizeof(uint32_t) * 4));
 5083 }
 5084 
 5085 /*
 5086  * Read Rom
 5087  */
 5088 
 5089 static uint32_t 
 5090 ql_rdrom(qla_host_t *ha,
 5091         ql_minidump_entry_rdrom_t *romEntry,
 5092         uint32_t *data_buff)
 5093 {
 5094         int ret;
 5095         int i, loop_cnt;
 5096         uint32_t addr;
 5097         uint32_t value;
 5098 
 5099         addr = romEntry->read_addr;
 5100         loop_cnt = romEntry->read_data_size; /* This is size in bytes */
 5101         loop_cnt /= sizeof(value);
 5102 
 5103         for (i = 0; i < loop_cnt; i++) {
 5104                 ret = ql_rd_flash32(ha, addr, &value);
 5105                 if (ret)
 5106                         return (0);
 5107 
 5108                 *data_buff++ = value;
 5109                 addr += sizeof(value);
 5110         }
 5111 
 5112         return (loop_cnt * sizeof(value));
 5113 }
 5114 
 5115 /*
 5116  * Read MUX data
 5117  */
 5118 
 5119 static uint32_t 
 5120 ql_rdmux(qla_host_t *ha,
 5121         ql_minidump_entry_mux_t *muxEntry,
 5122         uint32_t *data_buff)
 5123 {
 5124         int ret;
 5125         int loop_cnt;
 5126         uint32_t read_value, sel_value;
 5127         uint32_t read_addr, select_addr;
 5128 
 5129         select_addr = muxEntry->select_addr;
 5130         sel_value = muxEntry->select_value;
 5131         read_addr = muxEntry->read_addr;
 5132 
 5133         for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
 5134                 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
 5135                 if (ret)
 5136                         return (0);
 5137 
 5138                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
 5139                 if (ret)
 5140                         return (0);
 5141 
 5142                 *data_buff++ = sel_value;
 5143                 *data_buff++ = read_value;
 5144 
 5145                 sel_value += muxEntry->select_value_stride;
 5146         }
 5147 
 5148         return (loop_cnt * (2 * sizeof(uint32_t)));
 5149 }
 5150 
 5151 static uint32_t
 5152 ql_rdmux2(qla_host_t *ha,
 5153         ql_minidump_entry_mux2_t *muxEntry,
 5154         uint32_t *data_buff)
 5155 {
 5156         int ret;
 5157         int loop_cnt;
 5158 
 5159         uint32_t select_addr_1, select_addr_2;
 5160         uint32_t select_value_1, select_value_2;
 5161         uint32_t select_value_count, select_value_mask;
 5162         uint32_t read_addr, read_value;
 5163 
 5164         select_addr_1 = muxEntry->select_addr_1;
 5165         select_addr_2 = muxEntry->select_addr_2;
 5166         select_value_1 = muxEntry->select_value_1;
 5167         select_value_2 = muxEntry->select_value_2;
 5168         select_value_count = muxEntry->select_value_count;
 5169         select_value_mask  = muxEntry->select_value_mask;
 5170 
 5171         read_addr = muxEntry->read_addr;
 5172 
 5173         for (loop_cnt = 0; loop_cnt < select_value_count; loop_cnt++) {
 5174                 uint32_t temp_sel_val;
 5175 
 5176                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
 5177                 if (ret)
 5178                         return (0);
 5179 
 5180                 temp_sel_val = select_value_1 & select_value_mask;
 5181 
 5182                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
 5183                 if (ret)
 5184                         return (0);
 5185 
 5186                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
 5187                 if (ret)
 5188                         return (0);
 5189 
 5190                 *data_buff++ = temp_sel_val;
 5191                 *data_buff++ = read_value;
 5192 
 5193                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
 5194                 if (ret)
 5195                         return (0);
 5196 
 5197                 temp_sel_val = select_value_2 & select_value_mask;
 5198 
 5199                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
 5200                 if (ret)
 5201                         return (0);
 5202 
 5203                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
 5204                 if (ret)
 5205                         return (0);
 5206 
 5207                 *data_buff++ = temp_sel_val;
 5208                 *data_buff++ = read_value;
 5209 
 5210                 select_value_1 += muxEntry->select_value_stride;
 5211                 select_value_2 += muxEntry->select_value_stride;
 5212         }
 5213 
 5214         return (loop_cnt * (4 * sizeof(uint32_t)));
 5215 }
 5216 
 5217 /*
 5218  * Handling Queue State Reads.
 5219  */
 5220 
 5221 static uint32_t 
 5222 ql_rdqueue(qla_host_t *ha,
 5223         ql_minidump_entry_queue_t *queueEntry,
 5224         uint32_t *data_buff)
 5225 {
 5226         int ret;
 5227         int loop_cnt, k;
 5228         uint32_t read_value;
 5229         uint32_t read_addr, read_stride, select_addr;
 5230         uint32_t queue_id, read_cnt;
 5231 
 5232         read_cnt = queueEntry->read_addr_cnt;
 5233         read_stride = queueEntry->read_addr_stride;
 5234         select_addr = queueEntry->select_addr;
 5235 
 5236         for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
 5237                 loop_cnt++) {
 5238                 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
 5239                 if (ret)
 5240                         return (0);
 5241 
 5242                 read_addr = queueEntry->read_addr;
 5243 
 5244                 for (k = 0; k < read_cnt; k++) {
 5245                         ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
 5246                         if (ret)
 5247                                 return (0);
 5248 
 5249                         *data_buff++ = read_value;
 5250                         read_addr += read_stride;
 5251                 }
 5252 
 5253                 queue_id += queueEntry->queue_id_stride;
 5254         }
 5255 
 5256         return (loop_cnt * (read_cnt * sizeof(uint32_t)));
 5257 }
 5258 
 5259 /*
 5260  * Handling control entries.
 5261  */
 5262 
 5263 static uint32_t 
 5264 ql_cntrl(qla_host_t *ha,
 5265         ql_minidump_template_hdr_t *template_hdr,
 5266         ql_minidump_entry_cntrl_t *crbEntry)
 5267 {
 5268         int ret;
 5269         int count;
 5270         uint32_t opcode, read_value, addr, entry_addr;
 5271         long timeout;
 5272 
 5273         entry_addr = crbEntry->addr;
 5274 
 5275         for (count = 0; count < crbEntry->op_count; count++) {
 5276                 opcode = crbEntry->opcode;
 5277 
 5278                 if (opcode & QL_DBG_OPCODE_WR) {
 5279                         ret = ql_rdwr_indreg32(ha, entry_addr,
 5280                                         &crbEntry->value_1, 0);
 5281                         if (ret)
 5282                                 return (0);
 5283 
 5284                         opcode &= ~QL_DBG_OPCODE_WR;
 5285                 }
 5286 
 5287                 if (opcode & QL_DBG_OPCODE_RW) {
 5288                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
 5289                         if (ret)
 5290                                 return (0);
 5291 
 5292                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
 5293                         if (ret)
 5294                                 return (0);
 5295 
 5296                         opcode &= ~QL_DBG_OPCODE_RW;
 5297                 }
 5298 
 5299                 if (opcode & QL_DBG_OPCODE_AND) {
 5300                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
 5301                         if (ret)
 5302                                 return (0);
 5303 
 5304                         read_value &= crbEntry->value_2;
 5305                         opcode &= ~QL_DBG_OPCODE_AND;
 5306 
 5307                         if (opcode & QL_DBG_OPCODE_OR) {
 5308                                 read_value |= crbEntry->value_3;
 5309                                 opcode &= ~QL_DBG_OPCODE_OR;
 5310                         }
 5311 
 5312                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
 5313                         if (ret)
 5314                                 return (0);
 5315                 }
 5316 
 5317                 if (opcode & QL_DBG_OPCODE_OR) {
 5318                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
 5319                         if (ret)
 5320                                 return (0);
 5321 
 5322                         read_value |= crbEntry->value_3;
 5323 
 5324                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
 5325                         if (ret)
 5326                                 return (0);
 5327 
 5328                         opcode &= ~QL_DBG_OPCODE_OR;
 5329                 }
 5330 
 5331                 if (opcode & QL_DBG_OPCODE_POLL) {
 5332                         opcode &= ~QL_DBG_OPCODE_POLL;
 5333                         timeout = crbEntry->poll_timeout;
 5334                         addr = entry_addr;
 5335 
 5336                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
 5337                         if (ret)
 5338                                 return (0);
 5339 
 5340                         while ((read_value & crbEntry->value_2)
 5341                                 != crbEntry->value_1) {
 5342                                 if (timeout) {
 5343                                         qla_mdelay(__func__, 1);
 5344                                         timeout--;
 5345                                 } else
 5346                                         break;
 5347 
 5348                                 ret = ql_rdwr_indreg32(ha, addr,
 5349                                                 &read_value, 1);
 5350                                 if (ret)
 5351                                         return (0);
 5352                         }
 5353 
 5354                         if (!timeout) {
 5355                                 /*
 5356                                  * Report timeout error.
 5357                                  * core dump capture failed
 5358                                  * Skip remaining entries.
 5359                                  * Write buffer out to file
 5360                                  * Use driver specific fields in template header
 5361                                  * to report this error.
 5362                                  */
 5363                                 return (-1);
 5364                         }
 5365                 }
 5366 
 5367                 if (opcode & QL_DBG_OPCODE_RDSTATE) {
 5368                         /*
 5369                          * decide which address to use.
 5370                          */
 5371                         if (crbEntry->state_index_a) {
 5372                                 addr = template_hdr->saved_state_array[
 5373                                                 crbEntry-> state_index_a];
 5374                         } else {
 5375                                 addr = entry_addr;
 5376                         }
 5377 
 5378                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
 5379                         if (ret)
 5380                                 return (0);
 5381 
 5382                         template_hdr->saved_state_array[crbEntry->state_index_v]
 5383                                         = read_value;
 5384                         opcode &= ~QL_DBG_OPCODE_RDSTATE;
 5385                 }
 5386 
 5387                 if (opcode & QL_DBG_OPCODE_WRSTATE) {
 5388                         /*
 5389                          * decide which value to use.
 5390                          */
 5391                         if (crbEntry->state_index_v) {
 5392                                 read_value = template_hdr->saved_state_array[
 5393                                                 crbEntry->state_index_v];
 5394                         } else {
 5395                                 read_value = crbEntry->value_1;
 5396                         }
 5397                         /*
 5398                          * decide which address to use.
 5399                          */
 5400                         if (crbEntry->state_index_a) {
 5401                                 addr = template_hdr->saved_state_array[
 5402                                                 crbEntry-> state_index_a];
 5403                         } else {
 5404                                 addr = entry_addr;
 5405                         }
 5406 
 5407                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
 5408                         if (ret)
 5409                                 return (0);
 5410 
 5411                         opcode &= ~QL_DBG_OPCODE_WRSTATE;
 5412                 }
 5413 
 5414                 if (opcode & QL_DBG_OPCODE_MDSTATE) {
 5415                         /*  Read value from saved state using index */
 5416                         read_value = template_hdr->saved_state_array[
 5417                                                 crbEntry->state_index_v];
 5418 
 5419                         read_value <<= crbEntry->shl; /*Shift left operation */
 5420                         read_value >>= crbEntry->shr; /*Shift right operation */
 5421 
 5422                         if (crbEntry->value_2) {
 5423                                 /* check if AND mask is provided */
 5424                                 read_value &= crbEntry->value_2;
 5425                         }
 5426 
 5427                         read_value |= crbEntry->value_3; /* OR operation */
 5428                         read_value += crbEntry->value_1; /* increment op */
 5429 
 5430                         /* Write value back to state area. */
 5431 
 5432                         template_hdr->saved_state_array[crbEntry->state_index_v]
 5433                                         = read_value;
 5434                         opcode &= ~QL_DBG_OPCODE_MDSTATE;
 5435                 }
 5436 
 5437                 entry_addr += crbEntry->addr_stride;
 5438         }
 5439 
 5440         return (0);
 5441 }
 5442 
 5443 /*
 5444  * Handling rd poll entry.
 5445  */
 5446 
 5447 static uint32_t 
 5448 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
 5449         uint32_t *data_buff)
 5450 {
 5451         int ret;
 5452         int loop_cnt;
 5453         uint32_t op_count, select_addr, select_value_stride, select_value;
 5454         uint32_t read_addr, poll, mask, data;
 5455         uint32_t wait_count = 0;
 5456 
 5457         select_addr            = entry->select_addr;
 5458         read_addr              = entry->read_addr;
 5459         select_value           = entry->select_value;
 5460         select_value_stride    = entry->select_value_stride;
 5461         op_count               = entry->op_count;
 5462         poll                   = entry->poll;
 5463         mask                   = entry->mask;
 5464 
 5465         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
 5466                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
 5467                 if (ret)
 5468                         return (0);
 5469 
 5470                 wait_count = 0;
 5471 
 5472                 while (wait_count < poll) {
 5473                         uint32_t temp;
 5474 
 5475                         ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
 5476                         if (ret)
 5477                                 return (0);
 5478 
 5479                         if ( (temp & mask) != 0 ) {
 5480                                 break;
 5481                         }
 5482                         wait_count++;
 5483                 }
 5484 
 5485                 if (wait_count == poll) {
 5486                         device_printf(ha->pci_dev,
 5487                                 "%s: Error in processing entry\n", __func__);
 5488                         device_printf(ha->pci_dev,
 5489                                 "%s: wait_count <0x%x> poll <0x%x>\n",
 5490                                 __func__, wait_count, poll);
 5491                         return 0;
 5492                 }
 5493 
 5494                 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
 5495                 if (ret)
 5496                         return (0);
 5497 
 5498                 *data_buff++ = select_value;
 5499                 *data_buff++ = data;
 5500                 select_value = select_value + select_value_stride;
 5501         }
 5502 
 5503         /*
 5504          * for testing purpose we return amount of data written
 5505          */
 5506         return (loop_cnt * (2 * sizeof(uint32_t)));
 5507 }
 5508 
 5509 /*
 5510  * Handling rd modify write poll entry.
 5511  */
 5512 
 5513 static uint32_t 
 5514 ql_pollrd_modify_write(qla_host_t *ha,
 5515         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
 5516         uint32_t *data_buff)
 5517 {
 5518         int ret;
 5519         uint32_t addr_1, addr_2, value_1, value_2, data;
 5520         uint32_t poll, mask, modify_mask;
 5521         uint32_t wait_count = 0;
 5522 
 5523         addr_1          = entry->addr_1;
 5524         addr_2          = entry->addr_2;
 5525         value_1         = entry->value_1;
 5526         value_2         = entry->value_2;
 5527 
 5528         poll            = entry->poll;
 5529         mask            = entry->mask;
 5530         modify_mask     = entry->modify_mask;
 5531 
 5532         ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
 5533         if (ret)
 5534                 return (0);
 5535 
 5536         wait_count = 0;
 5537         while (wait_count < poll) {
 5538                 uint32_t temp;
 5539 
 5540                 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
 5541                 if (ret)
 5542                         return (0);
 5543 
 5544                 if ( (temp & mask) != 0 ) {
 5545                         break;
 5546                 }
 5547                 wait_count++;
 5548         }
 5549 
 5550         if (wait_count == poll) {
 5551                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
 5552                         __func__);
 5553         } else {
 5554                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
 5555                 if (ret)
 5556                         return (0);
 5557 
 5558                 data = (data & modify_mask);
 5559 
 5560                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
 5561                 if (ret)
 5562                         return (0);
 5563 
 5564                 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
 5565                 if (ret)
 5566                         return (0);
 5567 
 5568                 /* Poll again */
 5569                 wait_count = 0;
 5570                 while (wait_count < poll) {
 5571                         uint32_t temp;
 5572 
 5573                         ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
 5574                         if (ret)
 5575                                 return (0);
 5576 
 5577                         if ( (temp & mask) != 0 ) {
 5578                                 break;
 5579                         }
 5580                         wait_count++;
 5581                 }
 5582                 *data_buff++ = addr_2;
 5583                 *data_buff++ = data;
 5584         }
 5585 
 5586         /*
 5587          * for testing purpose we return amount of data written
 5588          */
 5589         return (2 * sizeof(uint32_t));
 5590 }

Cache object: 57356e0abc6ee19a49e9ca6045b45103


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.