The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mrsas/mrsas.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
    3  * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
    4  * Support: freebsdraid@avagotech.com
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions are
    8  * met:
    9  *
   10  * 1. Redistributions of source code must retain the above copyright notice,
   11  * this list of conditions and the following disclaimer. 2. Redistributions
   12  * in binary form must reproduce the above copyright notice, this list of
   13  * conditions and the following disclaimer in the documentation and/or other
   14  * materials provided with the distribution. 3. Neither the name of the
   15  * <ORGANIZATION> nor the names of its contributors may be used to endorse or
   16  * promote products derived from this software without specific prior written
   17  * permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
   23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  * POSSIBILITY OF SUCH DAMAGE.
   30  *
   31  * The views and conclusions contained in the software and documentation are
   32  * those of the authors and should not be interpreted as representing
   33  * official policies,either expressed or implied, of the FreeBSD Project.
   34  *
   35  * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
   36  * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
   37  *
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __FBSDID("$FreeBSD$");
   42 
   43 #include <dev/mrsas/mrsas.h>
   44 #include <dev/mrsas/mrsas_ioctl.h>
   45 
   46 #include <cam/cam.h>
   47 #include <cam/cam_ccb.h>
   48 
   49 #include <sys/sysctl.h>
   50 #include <sys/types.h>
   51 #include <sys/sysent.h>
   52 #include <sys/kthread.h>
   53 #include <sys/taskqueue.h>
   54 #include <sys/smp.h>
   55 #include <sys/endian.h>
   56 
   57 /*
   58  * Function prototypes
   59  */
   60 static d_open_t mrsas_open;
   61 static d_close_t mrsas_close;
   62 static d_ioctl_t mrsas_ioctl;
   63 static d_poll_t mrsas_poll;
   64 
   65 static void mrsas_ich_startup(void *arg);
   66 static struct mrsas_mgmt_info mrsas_mgmt_info;
   67 static struct mrsas_ident *mrsas_find_ident(device_t);
   68 static int mrsas_setup_msix(struct mrsas_softc *sc);
   69 static int mrsas_allocate_msix(struct mrsas_softc *sc);
   70 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
   71 static void mrsas_flush_cache(struct mrsas_softc *sc);
   72 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
   73 static void mrsas_ocr_thread(void *arg);
   74 static int mrsas_get_map_info(struct mrsas_softc *sc);
   75 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
   76 static int mrsas_sync_map_info(struct mrsas_softc *sc);
   77 static int mrsas_get_pd_list(struct mrsas_softc *sc);
   78 static int mrsas_get_ld_list(struct mrsas_softc *sc);
   79 static int mrsas_setup_irq(struct mrsas_softc *sc);
   80 static int mrsas_alloc_mem(struct mrsas_softc *sc);
   81 static int mrsas_init_fw(struct mrsas_softc *sc);
   82 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
   83 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
   84 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
   85 static int mrsas_clear_intr(struct mrsas_softc *sc);
   86 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
   87 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
   88 static int
   89 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
   90     struct mrsas_mfi_cmd *cmd_to_abort);
   91 static void
   92 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id);
   93 static struct mrsas_softc *
   94 mrsas_get_softc_instance(struct cdev *dev,
   95     u_long cmd, caddr_t arg);
   96 u_int32_t
   97 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset);
   98 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
   99 u_int8_t
  100 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
  101     struct mrsas_mfi_cmd *mfi_cmd);
  102 void    mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
  103 int     mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
  104 int     mrsas_init_adapter(struct mrsas_softc *sc);
  105 int     mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
  106 int     mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
  107 int     mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
  108 int     mrsas_ioc_init(struct mrsas_softc *sc);
  109 int     mrsas_bus_scan(struct mrsas_softc *sc);
  110 int     mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
  111 int     mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
  112 int     mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
  113 int     mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
  114 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
  115 int mrsas_reset_targets(struct mrsas_softc *sc);
  116 int
  117 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
  118     struct mrsas_mfi_cmd *cmd);
  119 int
  120 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
  121     int size);
  122 void    mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
  123 void    mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
  124 void    mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
  125 void    mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
  126 void    mrsas_disable_intr(struct mrsas_softc *sc);
  127 void    mrsas_enable_intr(struct mrsas_softc *sc);
  128 void    mrsas_free_ioc_cmd(struct mrsas_softc *sc);
  129 void    mrsas_free_mem(struct mrsas_softc *sc);
  130 void    mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
  131 void    mrsas_isr(void *arg);
  132 void    mrsas_teardown_intr(struct mrsas_softc *sc);
  133 void    mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
  134 void    mrsas_kill_hba(struct mrsas_softc *sc);
  135 void    mrsas_aen_handler(struct mrsas_softc *sc);
  136 void
  137 mrsas_write_reg(struct mrsas_softc *sc, int offset,
  138     u_int32_t value);
  139 void
  140 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
  141     u_int32_t req_desc_hi);
  142 void    mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
  143 void
  144 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
  145     struct mrsas_mfi_cmd *cmd, u_int8_t status);
  146 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
  147 
  148 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
  149         (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
  150 
  151 extern int mrsas_cam_attach(struct mrsas_softc *sc);
  152 extern void mrsas_cam_detach(struct mrsas_softc *sc);
  153 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
  154 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
  155 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
  156 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
  157 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
  158 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
  159 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
  160 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
  161 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
  162 extern void mrsas_xpt_release(struct mrsas_softc *sc);
  163 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
  164 mrsas_get_request_desc(struct mrsas_softc *sc,
  165     u_int16_t index);
  166 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
  167 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
  168 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
  169 void    mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
  170 
  171 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd,
  172         union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus,
  173         u_int32_t data_length, u_int8_t *sense);
  174 void
  175 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
  176     u_int32_t req_desc_hi);
  177 
  178 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
  179     "MRSAS Driver Parameters");
  180 
  181 /*
  182  * PCI device struct and table
  183  *
  184  */
  185 typedef struct mrsas_ident {
  186         uint16_t vendor;
  187         uint16_t device;
  188         uint16_t subvendor;
  189         uint16_t subdevice;
  190         const char *desc;
  191 }       MRSAS_CTLR_ID;
  192 
  193 MRSAS_CTLR_ID device_table[] = {
  194         {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
  195         {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
  196         {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
  197         {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
  198         {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
  199         {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
  200         {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
  201         {0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"},
  202         {0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"},
  203         {0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"},
  204         {0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"},
  205         {0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"},
  206         {0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"},
  207         {0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"},
  208         {0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"},
  209         {0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"},
  210         {0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"},
  211         {0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"},
  212         {0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"},
  213         {0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"},
  214         {0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"},
  215         {0, 0, 0, 0, NULL}
  216 };
  217 
  218 /*
  219  * Character device entry points
  220  *
  221  */
  222 static struct cdevsw mrsas_cdevsw = {
  223         .d_version = D_VERSION,
  224         .d_open = mrsas_open,
  225         .d_close = mrsas_close,
  226         .d_ioctl = mrsas_ioctl,
  227         .d_poll = mrsas_poll,
  228         .d_name = "mrsas",
  229 };
  230 
  231 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
  232 
  233 int
  234 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
  235 {
  236 
  237         return (0);
  238 }
  239 
  240 int
  241 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
  242 {
  243 
  244         return (0);
  245 }
  246 
  247 u_int32_t
  248 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset)
  249 {
  250         u_int32_t i = 0, ret_val;
  251 
  252         if (sc->is_aero) {
  253                 do {
  254                         ret_val = mrsas_read_reg(sc, offset);
  255                         i++;
  256                 } while(ret_val == 0 && i < 3);
  257         } else
  258                 ret_val = mrsas_read_reg(sc, offset);
  259 
  260         return ret_val;
  261 }
  262 
  263 /*
  264  * Register Read/Write Functions
  265  *
  266  */
  267 void
  268 mrsas_write_reg(struct mrsas_softc *sc, int offset,
  269     u_int32_t value)
  270 {
  271         bus_space_tag_t bus_tag = sc->bus_tag;
  272         bus_space_handle_t bus_handle = sc->bus_handle;
  273 
  274         bus_space_write_4(bus_tag, bus_handle, offset, value);
  275 }
  276 
  277 u_int32_t
  278 mrsas_read_reg(struct mrsas_softc *sc, int offset)
  279 {
  280         bus_space_tag_t bus_tag = sc->bus_tag;
  281         bus_space_handle_t bus_handle = sc->bus_handle;
  282 
  283         return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
  284 }
  285 
  286 /*
  287  * Interrupt Disable/Enable/Clear Functions
  288  *
  289  */
  290 void
  291 mrsas_disable_intr(struct mrsas_softc *sc)
  292 {
  293         u_int32_t mask = 0xFFFFFFFF;
  294 
  295         sc->mask_interrupts = 1;
  296         mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
  297         /* Dummy read to force pci flush */
  298         (void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
  299 }
  300 
  301 void
  302 mrsas_enable_intr(struct mrsas_softc *sc)
  303 {
  304         u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
  305 
  306         sc->mask_interrupts = 0;
  307         mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
  308         (void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
  309 
  310         mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
  311         (void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
  312 }
  313 
  314 static int
  315 mrsas_clear_intr(struct mrsas_softc *sc)
  316 {
  317         u_int32_t status;
  318 
  319         /* Read received interrupt */
  320         status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status));
  321 
  322         /* Not our interrupt, so just return */
  323         if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
  324                 return (0);
  325 
  326         /* We got a reply interrupt */
  327         return (1);
  328 }
  329 
  330 /*
  331  * PCI Support Functions
  332  *
  333  */
  334 static struct mrsas_ident *
  335 mrsas_find_ident(device_t dev)
  336 {
  337         struct mrsas_ident *pci_device;
  338 
  339         for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
  340                 if ((pci_device->vendor == pci_get_vendor(dev)) &&
  341                     (pci_device->device == pci_get_device(dev)) &&
  342                     ((pci_device->subvendor == pci_get_subvendor(dev)) ||
  343                     (pci_device->subvendor == 0xffff)) &&
  344                     ((pci_device->subdevice == pci_get_subdevice(dev)) ||
  345                     (pci_device->subdevice == 0xffff)))
  346                         return (pci_device);
  347         }
  348         return (NULL);
  349 }
  350 
  351 static int
  352 mrsas_probe(device_t dev)
  353 {
  354         static u_int8_t first_ctrl = 1;
  355         struct mrsas_ident *id;
  356 
  357         if ((id = mrsas_find_ident(dev)) != NULL) {
  358                 if (first_ctrl) {
  359                         printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
  360                             MRSAS_VERSION);
  361                         first_ctrl = 0;
  362                 }
  363                 device_set_desc(dev, id->desc);
  364                 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
  365                 return (-30);
  366         }
  367         return (ENXIO);
  368 }
  369 
  370 /*
  371  * mrsas_setup_sysctl:  setup sysctl values for mrsas
  372  * input:                               Adapter instance soft state
  373  *
  374  * Setup sysctl entries for mrsas driver.
  375  */
  376 static void
  377 mrsas_setup_sysctl(struct mrsas_softc *sc)
  378 {
  379         struct sysctl_ctx_list *sysctl_ctx = NULL;
  380         struct sysctl_oid *sysctl_tree = NULL;
  381         char tmpstr[80], tmpstr2[80];
  382 
  383         /*
  384          * Setup the sysctl variable so the user can change the debug level
  385          * on the fly.
  386          */
  387         snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
  388             device_get_unit(sc->mrsas_dev));
  389         snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
  390 
  391         sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
  392         if (sysctl_ctx != NULL)
  393                 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
  394 
  395         if (sysctl_tree == NULL) {
  396                 sysctl_ctx_init(&sc->sysctl_ctx);
  397                 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
  398                     SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
  399                     CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr);
  400                 if (sc->sysctl_tree == NULL)
  401                         return;
  402                 sysctl_ctx = &sc->sysctl_ctx;
  403                 sysctl_tree = sc->sysctl_tree;
  404         }
  405         SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
  406             OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
  407             "Disable the use of OCR");
  408 
  409         SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
  410             OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
  411             strlen(MRSAS_VERSION), "driver version");
  412 
  413         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
  414             OID_AUTO, "reset_count", CTLFLAG_RD,
  415             &sc->reset_count, 0, "number of ocr from start of the day");
  416 
  417         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
  418             OID_AUTO, "fw_outstanding", CTLFLAG_RD,
  419             &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
  420 
  421         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
  422             OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
  423             &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
  424 
  425         SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
  426             OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
  427             "Driver debug level");
  428 
  429         SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
  430             OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
  431             0, "Driver IO timeout value in mili-second.");
  432 
  433         SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
  434             OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
  435             &sc->mrsas_fw_fault_check_delay,
  436             0, "FW fault check thread delay in seconds. <default is 1 sec>");
  437 
  438         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
  439             OID_AUTO, "reset_in_progress", CTLFLAG_RD,
  440             &sc->reset_in_progress, 0, "ocr in progress status");
  441 
  442         SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
  443             OID_AUTO, "block_sync_cache", CTLFLAG_RW,
  444             &sc->block_sync_cache, 0,
  445             "Block SYNC CACHE at driver. <default: 0, send it to FW>");
  446         SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
  447             OID_AUTO, "stream detection", CTLFLAG_RW,
  448                 &sc->drv_stream_detection, 0,
  449                 "Disable/Enable Stream detection. <default: 1, Enable Stream Detection>");
  450         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
  451             OID_AUTO, "prp_count", CTLFLAG_RD,
  452             &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built");
  453         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
  454             OID_AUTO, "SGE holes", CTLFLAG_RD,
  455             &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs");
  456 }
  457 
  458 /*
  459  * mrsas_get_tunables:  get tunable parameters.
  460  * input:                               Adapter instance soft state
  461  *
  462  * Get tunable parameters. This will help to debug driver at boot time.
  463  */
  464 static void
  465 mrsas_get_tunables(struct mrsas_softc *sc)
  466 {
  467         char tmpstr[80];
  468 
  469         /* XXX default to some debugging for now */
  470         sc->mrsas_debug =
  471                 (MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN);
  472         sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
  473         sc->mrsas_fw_fault_check_delay = 1;
  474         sc->reset_count = 0;
  475         sc->reset_in_progress = 0;
  476         sc->block_sync_cache = 0;
  477         sc->drv_stream_detection = 1;
  478 
  479         /*
  480          * Grab the global variables.
  481          */
  482         TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
  483 
  484         /*
  485          * Grab the global variables.
  486          */
  487         TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
  488 
  489         /* Grab the unit-instance variables */
  490         snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
  491             device_get_unit(sc->mrsas_dev));
  492         TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
  493 }
  494 
  495 /*
  496  * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
  497  * Used to get sequence number at driver load time.
  498  * input:               Adapter soft state
  499  *
  500  * Allocates DMAable memory for the event log info internal command.
  501  */
  502 int
  503 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
  504 {
  505         int el_info_size;
  506 
  507         /* Allocate get event log info command */
  508         el_info_size = sizeof(struct mrsas_evt_log_info);
  509         if (bus_dma_tag_create(sc->mrsas_parent_tag,
  510             1, 0,
  511             BUS_SPACE_MAXADDR_32BIT,
  512             BUS_SPACE_MAXADDR,
  513             NULL, NULL,
  514             el_info_size,
  515             1,
  516             el_info_size,
  517             BUS_DMA_ALLOCNOW,
  518             NULL, NULL,
  519             &sc->el_info_tag)) {
  520                 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
  521                 return (ENOMEM);
  522         }
  523         if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
  524             BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
  525                 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
  526                 return (ENOMEM);
  527         }
  528         if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
  529             sc->el_info_mem, el_info_size, mrsas_addr_cb,
  530             &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
  531                 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
  532                 return (ENOMEM);
  533         }
  534         memset(sc->el_info_mem, 0, el_info_size);
  535         return (0);
  536 }
  537 
  538 /*
  539  * mrsas_free_evt_info_cmd:     Free memory for Event log info command
  540  * input:                                       Adapter soft state
  541  *
  542  * Deallocates memory for the event log info internal command.
  543  */
  544 void
  545 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
  546 {
  547         if (sc->el_info_phys_addr)
  548                 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
  549         if (sc->el_info_mem != NULL)
  550                 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
  551         if (sc->el_info_tag != NULL)
  552                 bus_dma_tag_destroy(sc->el_info_tag);
  553 }
  554 
  555 /*
  556  *  mrsas_get_seq_num:  Get latest event sequence number
  557  *  @sc:                                Adapter soft state
  558  *  @eli:                               Firmware event log sequence number information.
  559  *
  560  * Firmware maintains a log of all events in a non-volatile area.
  561  * Driver get the sequence number using DCMD
  562  * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
  563  */
  564 
  565 static int
  566 mrsas_get_seq_num(struct mrsas_softc *sc,
  567     struct mrsas_evt_log_info *eli)
  568 {
  569         struct mrsas_mfi_cmd *cmd;
  570         struct mrsas_dcmd_frame *dcmd;
  571         u_int8_t do_ocr = 1, retcode = 0;
  572 
  573         cmd = mrsas_get_mfi_cmd(sc);
  574 
  575         if (!cmd) {
  576                 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
  577                 return -ENOMEM;
  578         }
  579         dcmd = &cmd->frame->dcmd;
  580 
  581         if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
  582                 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
  583                 mrsas_release_mfi_cmd(cmd);
  584                 return -ENOMEM;
  585         }
  586         memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  587 
  588         dcmd->cmd = MFI_CMD_DCMD;
  589         dcmd->cmd_status = 0x0;
  590         dcmd->sge_count = 1;
  591         dcmd->flags = htole16(MFI_FRAME_DIR_READ);
  592         dcmd->timeout = 0;
  593         dcmd->pad_0 = 0;
  594         dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_log_info));
  595         dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_GET_INFO);
  596         dcmd->sgl.sge32[0].phys_addr = htole32(sc->el_info_phys_addr & 0xFFFFFFFF);
  597         dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_log_info));
  598 
  599         retcode = mrsas_issue_blocked_cmd(sc, cmd);
  600         if (retcode == ETIMEDOUT)
  601                 goto dcmd_timeout;
  602 
  603         do_ocr = 0;
  604         /*
  605          * Copy the data back into callers buffer
  606          */
  607         memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
  608         mrsas_free_evt_log_info_cmd(sc);
  609 
  610 dcmd_timeout:
  611         if (do_ocr)
  612                 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
  613         else
  614                 mrsas_release_mfi_cmd(cmd);
  615 
  616         return retcode;
  617 }
  618 
  619 /*
  620  *  mrsas_register_aen:         Register for asynchronous event notification
  621  *  @sc:                        Adapter soft state
  622  *  @seq_num:                   Starting sequence number
  623  *  @class_locale:              Class of the event
  624  *
  625  *  This function subscribes for events beyond the @seq_num
  626  *  and type @class_locale.
  627  *
  628  */
  629 static int
  630 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
  631     u_int32_t class_locale_word)
  632 {
  633         int ret_val;
  634         struct mrsas_mfi_cmd *cmd;
  635         struct mrsas_dcmd_frame *dcmd;
  636         union mrsas_evt_class_locale curr_aen;
  637         union mrsas_evt_class_locale prev_aen;
  638 
  639         /*
  640          * If there an AEN pending already (aen_cmd), check if the
  641          * class_locale of that pending AEN is inclusive of the new AEN
  642          * request we currently have. If it is, then we don't have to do
  643          * anything. In other words, whichever events the current AEN request
  644          * is subscribing to, have already been subscribed to. If the old_cmd
  645          * is _not_ inclusive, then we have to abort that command, form a
  646          * class_locale that is superset of both old and current and re-issue
  647          * to the FW
  648          */
  649 
  650         curr_aen.word = class_locale_word;
  651 
  652         if (sc->aen_cmd) {
  653                 prev_aen.word = le32toh(sc->aen_cmd->frame->dcmd.mbox.w[1]);
  654 
  655                 /*
  656                  * A class whose enum value is smaller is inclusive of all
  657                  * higher values. If a PROGRESS (= -1) was previously
  658                  * registered, then a new registration requests for higher
  659                  * classes need not be sent to FW. They are automatically
  660                  * included. Locale numbers don't have such hierarchy. They
  661                  * are bitmap values
  662                  */
  663                 if ((prev_aen.members.class <= curr_aen.members.class) &&
  664                     !((prev_aen.members.locale & curr_aen.members.locale) ^
  665                     curr_aen.members.locale)) {
  666                         /*
  667                          * Previously issued event registration includes
  668                          * current request. Nothing to do.
  669                          */
  670                         return 0;
  671                 } else {
  672                         curr_aen.members.locale |= prev_aen.members.locale;
  673 
  674                         if (prev_aen.members.class < curr_aen.members.class)
  675                                 curr_aen.members.class = prev_aen.members.class;
  676 
  677                         sc->aen_cmd->abort_aen = 1;
  678                         ret_val = mrsas_issue_blocked_abort_cmd(sc,
  679                             sc->aen_cmd);
  680 
  681                         if (ret_val) {
  682                                 printf("mrsas: Failed to abort previous AEN command\n");
  683                                 return ret_val;
  684                         } else
  685                                 sc->aen_cmd = NULL;
  686                 }
  687         }
  688         cmd = mrsas_get_mfi_cmd(sc);
  689         if (!cmd)
  690                 return ENOMEM;
  691 
  692         dcmd = &cmd->frame->dcmd;
  693 
  694         memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
  695 
  696         /*
  697          * Prepare DCMD for aen registration
  698          */
  699         memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  700 
  701         dcmd->cmd = MFI_CMD_DCMD;
  702         dcmd->cmd_status = 0x0;
  703         dcmd->sge_count = 1;
  704         dcmd->flags = htole16(MFI_FRAME_DIR_READ);
  705         dcmd->timeout = 0;
  706         dcmd->pad_0 = 0;
  707         dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_detail));
  708         dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
  709         dcmd->mbox.w[0] = htole32(seq_num);
  710         sc->last_seq_num = seq_num;
  711         dcmd->mbox.w[1] = htole32(curr_aen.word);
  712         dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->evt_detail_phys_addr & 0xFFFFFFFF);
  713         dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_detail));
  714 
  715         if (sc->aen_cmd != NULL) {
  716                 mrsas_release_mfi_cmd(cmd);
  717                 return 0;
  718         }
  719         /*
  720          * Store reference to the cmd used to register for AEN. When an
  721          * application wants us to register for AEN, we have to abort this
  722          * cmd and re-register with a new EVENT LOCALE supplied by that app
  723          */
  724         sc->aen_cmd = cmd;
  725 
  726         /*
  727          * Issue the aen registration frame
  728          */
  729         if (mrsas_issue_dcmd(sc, cmd)) {
  730                 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
  731                 return (1);
  732         }
  733         return 0;
  734 }
  735 
  736 /*
  737  * mrsas_start_aen:     Subscribes to AEN during driver load time
  738  * @instance:           Adapter soft state
  739  */
  740 static int
  741 mrsas_start_aen(struct mrsas_softc *sc)
  742 {
  743         struct mrsas_evt_log_info eli;
  744         union mrsas_evt_class_locale class_locale;
  745 
  746         /* Get the latest sequence number from FW */
  747 
  748         memset(&eli, 0, sizeof(eli));
  749 
  750         if (mrsas_get_seq_num(sc, &eli))
  751                 return -1;
  752 
  753         /* Register AEN with FW for latest sequence number plus 1 */
  754         class_locale.members.reserved = 0;
  755         class_locale.members.locale = MR_EVT_LOCALE_ALL;
  756         class_locale.members.class = MR_EVT_CLASS_DEBUG;
  757 
  758         return mrsas_register_aen(sc, eli.newest_seq_num + 1,
  759             class_locale.word);
  760 
  761 }
  762 
  763 /*
  764  * mrsas_setup_msix:    Allocate MSI-x vectors
  765  * @sc:                                 adapter soft state
  766  */
  767 static int
  768 mrsas_setup_msix(struct mrsas_softc *sc)
  769 {
  770         int i;
  771 
  772         for (i = 0; i < sc->msix_vectors; i++) {
  773                 sc->irq_context[i].sc = sc;
  774                 sc->irq_context[i].MSIxIndex = i;
  775                 sc->irq_id[i] = i + 1;
  776                 sc->mrsas_irq[i] = bus_alloc_resource_any
  777                     (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
  778                     ,RF_ACTIVE);
  779                 if (sc->mrsas_irq[i] == NULL) {
  780                         device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
  781                         goto irq_alloc_failed;
  782                 }
  783                 if (bus_setup_intr(sc->mrsas_dev,
  784                     sc->mrsas_irq[i],
  785                     INTR_MPSAFE | INTR_TYPE_CAM,
  786                     NULL, mrsas_isr, &sc->irq_context[i],
  787                     &sc->intr_handle[i])) {
  788                         device_printf(sc->mrsas_dev,
  789                             "Cannot set up MSI-x interrupt handler\n");
  790                         goto irq_alloc_failed;
  791                 }
  792         }
  793         return SUCCESS;
  794 
  795 irq_alloc_failed:
  796         mrsas_teardown_intr(sc);
  797         return (FAIL);
  798 }
  799 
  800 /*
  801  * mrsas_allocate_msix:         Setup MSI-x vectors
  802  * @sc:                                         adapter soft state
  803  */
  804 static int
  805 mrsas_allocate_msix(struct mrsas_softc *sc)
  806 {
  807         if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
  808                 device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
  809                     " of vectors\n", sc->msix_vectors);
  810         } else {
  811                 device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
  812                 goto irq_alloc_failed;
  813         }
  814         return SUCCESS;
  815 
  816 irq_alloc_failed:
  817         mrsas_teardown_intr(sc);
  818         return (FAIL);
  819 }
  820 
  821 /*
  822  * mrsas_attach:        PCI entry point
  823  * input:                       pointer to device struct
  824  *
  825  * Performs setup of PCI and registers, initializes mutexes and linked lists,
  826  * registers interrupts and CAM, and initializes   the adapter/controller to
  827  * its proper state.
  828  */
  829 static int
  830 mrsas_attach(device_t dev)
  831 {
  832         struct mrsas_softc *sc = device_get_softc(dev);
  833         uint32_t cmd, error;
  834 
  835         memset(sc, 0, sizeof(struct mrsas_softc));
  836 
  837         /* Look up our softc and initialize its fields. */
  838         sc->mrsas_dev = dev;
  839         sc->device_id = pci_get_device(dev);
  840 
  841         switch (sc->device_id) {
  842         case MRSAS_INVADER:
  843         case MRSAS_FURY:
  844         case MRSAS_INTRUDER:
  845         case MRSAS_INTRUDER_24:
  846         case MRSAS_CUTLASS_52:
  847         case MRSAS_CUTLASS_53:
  848                 sc->mrsas_gen3_ctrl = 1;
  849                 break;
  850         case MRSAS_VENTURA:
  851         case MRSAS_CRUSADER:
  852         case MRSAS_HARPOON:
  853         case MRSAS_TOMCAT:
  854         case MRSAS_VENTURA_4PORT:
  855         case MRSAS_CRUSADER_4PORT:
  856                 sc->is_ventura = true;
  857                 break;
  858         case MRSAS_AERO_10E1:
  859         case MRSAS_AERO_10E5:
  860                 device_printf(dev, "Adapter is in configurable secure mode\n");
  861         case MRSAS_AERO_10E2:
  862         case MRSAS_AERO_10E6:
  863                 sc->is_aero = true;
  864                 break;
  865         case MRSAS_AERO_10E0:
  866         case MRSAS_AERO_10E3:
  867         case MRSAS_AERO_10E4:
  868         case MRSAS_AERO_10E7:
  869                 device_printf(dev, "Adapter is in non-secure mode\n");
  870                 return SUCCESS;
  871         }
  872 
  873         mrsas_get_tunables(sc);
  874 
  875         /*
  876          * Set up PCI and registers
  877          */
  878         cmd = pci_read_config(dev, PCIR_COMMAND, 2);
  879         /* Force the busmaster enable bit on. */
  880         cmd |= PCIM_CMD_BUSMASTEREN;
  881         pci_write_config(dev, PCIR_COMMAND, cmd, 2);
  882 
  883         /* For Ventura/Aero system registers are mapped to BAR0 */
  884         if (sc->is_ventura || sc->is_aero)
  885                 sc->reg_res_id = PCIR_BAR(0);   /* BAR0 offset */
  886         else
  887                 sc->reg_res_id = PCIR_BAR(1);   /* BAR1 offset */
  888 
  889         if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
  890             &(sc->reg_res_id), RF_ACTIVE))
  891             == NULL) {
  892                 device_printf(dev, "Cannot allocate PCI registers\n");
  893                 goto attach_fail;
  894         }
  895         sc->bus_tag = rman_get_bustag(sc->reg_res);
  896         sc->bus_handle = rman_get_bushandle(sc->reg_res);
  897 
  898         /* Intialize mutexes */
  899         mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
  900         mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
  901         mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
  902         mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
  903         mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
  904         mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
  905         mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
  906         mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
  907         mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF);
  908 
  909         /* Intialize linked list */
  910         TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
  911         TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
  912 
  913         mrsas_atomic_set(&sc->fw_outstanding, 0);
  914         mrsas_atomic_set(&sc->target_reset_outstanding, 0);
  915         mrsas_atomic_set(&sc->prp_count, 0);
  916         mrsas_atomic_set(&sc->sge_holes, 0);
  917 
  918         sc->io_cmds_highwater = 0;
  919 
  920         sc->adprecovery = MRSAS_HBA_OPERATIONAL;
  921         sc->UnevenSpanSupport = 0;
  922 
  923         sc->msix_enable = 0;
  924 
  925         /* Initialize Firmware */
  926         if (mrsas_init_fw(sc) != SUCCESS) {
  927                 goto attach_fail_fw;
  928         }
  929         /* Register mrsas to CAM layer */
  930         if ((mrsas_cam_attach(sc) != SUCCESS)) {
  931                 goto attach_fail_cam;
  932         }
  933         /* Register IRQs */
  934         if (mrsas_setup_irq(sc) != SUCCESS) {
  935                 goto attach_fail_irq;
  936         }
  937         error = mrsas_kproc_create(mrsas_ocr_thread, sc,
  938             &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
  939             device_get_unit(sc->mrsas_dev));
  940         if (error) {
  941                 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
  942                 goto attach_fail_ocr_thread;
  943         }
  944         /*
  945          * After FW initialization and OCR thread creation
  946          * we will defer the cdev creation, AEN setup on ICH callback
  947          */
  948         sc->mrsas_ich.ich_func = mrsas_ich_startup;
  949         sc->mrsas_ich.ich_arg = sc;
  950         if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
  951                 device_printf(sc->mrsas_dev, "Config hook is already established\n");
  952         }
  953         mrsas_setup_sysctl(sc);
  954         return SUCCESS;
  955 
  956 attach_fail_ocr_thread:
  957         if (sc->ocr_thread_active)
  958                 wakeup(&sc->ocr_chan);
  959 attach_fail_irq:
  960         mrsas_teardown_intr(sc);
  961 attach_fail_cam:
  962         mrsas_cam_detach(sc);
  963 attach_fail_fw:
  964         /* if MSIX vector is allocated and FW Init FAILED then release MSIX */
  965         if (sc->msix_enable == 1)
  966                 pci_release_msi(sc->mrsas_dev);
  967         mrsas_free_mem(sc);
  968         mtx_destroy(&sc->sim_lock);
  969         mtx_destroy(&sc->aen_lock);
  970         mtx_destroy(&sc->pci_lock);
  971         mtx_destroy(&sc->io_lock);
  972         mtx_destroy(&sc->ioctl_lock);
  973         mtx_destroy(&sc->mpt_cmd_pool_lock);
  974         mtx_destroy(&sc->mfi_cmd_pool_lock);
  975         mtx_destroy(&sc->raidmap_lock);
  976         mtx_destroy(&sc->stream_lock);
  977 attach_fail:
  978         if (sc->reg_res) {
  979                 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
  980                     sc->reg_res_id, sc->reg_res);
  981         }
  982         return (ENXIO);
  983 }
  984 
  985 /*
  986  * Interrupt config hook
  987  */
  988 static void
  989 mrsas_ich_startup(void *arg)
  990 {
  991         int i = 0;
  992         struct mrsas_softc *sc = (struct mrsas_softc *)arg;
  993 
  994         /*
  995          * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
  996          */
  997         sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
  998             IOCTL_SEMA_DESCRIPTION);
  999 
 1000         /* Create a /dev entry for mrsas controller. */
 1001         sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
 1002             GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
 1003             device_get_unit(sc->mrsas_dev));
 1004 
 1005         if (device_get_unit(sc->mrsas_dev) == 0) {
 1006                 make_dev_alias_p(MAKEDEV_CHECKNAME,
 1007                     &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
 1008                     "megaraid_sas_ioctl_node");
 1009         }
 1010         if (sc->mrsas_cdev)
 1011                 sc->mrsas_cdev->si_drv1 = sc;
 1012 
 1013         /*
 1014          * Add this controller to mrsas_mgmt_info structure so that it can be
 1015          * exported to management applications
 1016          */
 1017         if (device_get_unit(sc->mrsas_dev) == 0)
 1018                 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
 1019 
 1020         mrsas_mgmt_info.count++;
 1021         mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
 1022         mrsas_mgmt_info.max_index++;
 1023 
 1024         /* Enable Interrupts */
 1025         mrsas_enable_intr(sc);
 1026 
 1027         /* Call DCMD get_pd_info for all system PDs */
 1028         for (i = 0; i < MRSAS_MAX_PD; i++) {
 1029                 if ((sc->target_list[i].target_id != 0xffff) &&
 1030                         sc->pd_info_mem)
 1031                         mrsas_get_pd_info(sc, sc->target_list[i].target_id);
 1032         }
 1033 
 1034         /* Initiate AEN (Asynchronous Event Notification) */
 1035         if (mrsas_start_aen(sc)) {
 1036                 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
 1037                     "Further events from the controller will not be communicated.\n"
 1038                     "Either there is some problem in the controller"
 1039                     "or the controller does not support AEN.\n"
 1040                     "Please contact to the SUPPORT TEAM if the problem persists\n");
 1041         }
 1042         if (sc->mrsas_ich.ich_arg != NULL) {
 1043                 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
 1044                 config_intrhook_disestablish(&sc->mrsas_ich);
 1045                 sc->mrsas_ich.ich_arg = NULL;
 1046         }
 1047 }
 1048 
 1049 /*
 1050  * mrsas_detach:        De-allocates and teardown resources
 1051  * input:                       pointer to device struct
 1052  *
 1053  * This function is the entry point for device disconnect and detach.
 1054  * It performs memory de-allocations, shutdown of the controller and various
 1055  * teardown and destroy resource functions.
 1056  */
 1057 static int
 1058 mrsas_detach(device_t dev)
 1059 {
 1060         struct mrsas_softc *sc;
 1061         int i = 0;
 1062 
 1063         sc = device_get_softc(dev);
 1064         sc->remove_in_progress = 1;
 1065 
 1066         /* Destroy the character device so no other IOCTL will be handled */
 1067         if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
 1068                 destroy_dev(sc->mrsas_linux_emulator_cdev);
 1069         destroy_dev(sc->mrsas_cdev);
 1070 
 1071         /*
 1072          * Take the instance off the instance array. Note that we will not
 1073          * decrement the max_index. We let this array be sparse array
 1074          */
 1075         for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
 1076                 if (mrsas_mgmt_info.sc_ptr[i] == sc) {
 1077                         mrsas_mgmt_info.count--;
 1078                         mrsas_mgmt_info.sc_ptr[i] = NULL;
 1079                         break;
 1080                 }
 1081         }
 1082 
 1083         if (sc->ocr_thread_active)
 1084                 wakeup(&sc->ocr_chan);
 1085         while (sc->reset_in_progress) {
 1086                 i++;
 1087                 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
 1088                         mrsas_dprint(sc, MRSAS_INFO,
 1089                             "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
 1090                 }
 1091                 pause("mr_shutdown", hz);
 1092         }
 1093         i = 0;
 1094         while (sc->ocr_thread_active) {
 1095                 i++;
 1096                 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
 1097                         mrsas_dprint(sc, MRSAS_INFO,
 1098                             "[%2d]waiting for "
 1099                             "mrsas_ocr thread to quit ocr %d\n", i,
 1100                             sc->ocr_thread_active);
 1101                 }
 1102                 pause("mr_shutdown", hz);
 1103         }
 1104         mrsas_flush_cache(sc);
 1105         mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
 1106         mrsas_disable_intr(sc);
 1107 
 1108         if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
 1109                 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
 1110                         free(sc->streamDetectByLD[i], M_MRSAS);
 1111                 free(sc->streamDetectByLD, M_MRSAS);
 1112                 sc->streamDetectByLD = NULL;
 1113         }
 1114 
 1115         mrsas_cam_detach(sc);
 1116         mrsas_teardown_intr(sc);
 1117         mrsas_free_mem(sc);
 1118         mtx_destroy(&sc->sim_lock);
 1119         mtx_destroy(&sc->aen_lock);
 1120         mtx_destroy(&sc->pci_lock);
 1121         mtx_destroy(&sc->io_lock);
 1122         mtx_destroy(&sc->ioctl_lock);
 1123         mtx_destroy(&sc->mpt_cmd_pool_lock);
 1124         mtx_destroy(&sc->mfi_cmd_pool_lock);
 1125         mtx_destroy(&sc->raidmap_lock);
 1126         mtx_destroy(&sc->stream_lock);
 1127 
 1128         /* Wait for all the semaphores to be released */
 1129         while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
 1130                 pause("mr_shutdown", hz);
 1131 
 1132         /* Destroy the counting semaphore created for Ioctl */
 1133         sema_destroy(&sc->ioctl_count_sema);
 1134 
 1135         if (sc->reg_res) {
 1136                 bus_release_resource(sc->mrsas_dev,
 1137                     SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
 1138         }
 1139         if (sc->sysctl_tree != NULL)
 1140                 sysctl_ctx_free(&sc->sysctl_ctx);
 1141 
 1142         return (0);
 1143 }
 1144 
 1145 static int
 1146 mrsas_shutdown(device_t dev)
 1147 {
 1148         struct mrsas_softc *sc;
 1149         int i;
 1150 
 1151         sc = device_get_softc(dev);
 1152         sc->remove_in_progress = 1;
 1153         if (!KERNEL_PANICKED()) {
 1154                 if (sc->ocr_thread_active)
 1155                         wakeup(&sc->ocr_chan);
 1156                 i = 0;
 1157                 while (sc->reset_in_progress && i < 15) {
 1158                         i++;
 1159                         if ((i % MRSAS_RESET_NOTICE_INTERVAL) == 0) {
 1160                                 mrsas_dprint(sc, MRSAS_INFO,
 1161                                     "[%2d]waiting for OCR to be finished "
 1162                                     "from %s\n", i, __func__);
 1163                         }
 1164                         pause("mr_shutdown", hz);
 1165                 }
 1166                 if (sc->reset_in_progress) {
 1167                         mrsas_dprint(sc, MRSAS_INFO,
 1168                             "gave up waiting for OCR to be finished\n");
 1169                         return (0);
 1170                 }
 1171         }
 1172 
 1173         mrsas_flush_cache(sc);
 1174         mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
 1175         mrsas_disable_intr(sc);
 1176         return (0);
 1177 }
 1178 
 1179 /*
 1180  * mrsas_free_mem:              Frees allocated memory
 1181  * input:                               Adapter instance soft state
 1182  *
 1183  * This function is called from mrsas_detach() to free previously allocated
 1184  * memory.
 1185  */
 1186 void
 1187 mrsas_free_mem(struct mrsas_softc *sc)
 1188 {
 1189         int i;
 1190         u_int32_t max_fw_cmds;
 1191         struct mrsas_mfi_cmd *mfi_cmd;
 1192         struct mrsas_mpt_cmd *mpt_cmd;
 1193 
 1194         /*
 1195          * Free RAID map memory
 1196          */
 1197         for (i = 0; i < 2; i++) {
 1198                 if (sc->raidmap_phys_addr[i])
 1199                         bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
 1200                 if (sc->raidmap_mem[i] != NULL)
 1201                         bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
 1202                 if (sc->raidmap_tag[i] != NULL)
 1203                         bus_dma_tag_destroy(sc->raidmap_tag[i]);
 1204 
 1205                 if (sc->ld_drv_map[i] != NULL)
 1206                         free(sc->ld_drv_map[i], M_MRSAS);
 1207         }
 1208         for (i = 0; i < 2; i++) {
 1209                 if (sc->jbodmap_phys_addr[i])
 1210                         bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
 1211                 if (sc->jbodmap_mem[i] != NULL)
 1212                         bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
 1213                 if (sc->jbodmap_tag[i] != NULL)
 1214                         bus_dma_tag_destroy(sc->jbodmap_tag[i]);
 1215         }
 1216         /*
 1217          * Free version buffer memory
 1218          */
 1219         if (sc->verbuf_phys_addr)
 1220                 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
 1221         if (sc->verbuf_mem != NULL)
 1222                 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
 1223         if (sc->verbuf_tag != NULL)
 1224                 bus_dma_tag_destroy(sc->verbuf_tag);
 1225 
 1226         /*
 1227          * Free sense buffer memory
 1228          */
 1229         if (sc->sense_phys_addr)
 1230                 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
 1231         if (sc->sense_mem != NULL)
 1232                 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
 1233         if (sc->sense_tag != NULL)
 1234                 bus_dma_tag_destroy(sc->sense_tag);
 1235 
 1236         /*
 1237          * Free chain frame memory
 1238          */
 1239         if (sc->chain_frame_phys_addr)
 1240                 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
 1241         if (sc->chain_frame_mem != NULL)
 1242                 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
 1243         if (sc->chain_frame_tag != NULL)
 1244                 bus_dma_tag_destroy(sc->chain_frame_tag);
 1245 
 1246         /*
 1247          * Free IO Request memory
 1248          */
 1249         if (sc->io_request_phys_addr)
 1250                 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
 1251         if (sc->io_request_mem != NULL)
 1252                 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
 1253         if (sc->io_request_tag != NULL)
 1254                 bus_dma_tag_destroy(sc->io_request_tag);
 1255 
 1256         /*
 1257          * Free Reply Descriptor memory
 1258          */
 1259         if (sc->reply_desc_phys_addr)
 1260                 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
 1261         if (sc->reply_desc_mem != NULL)
 1262                 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
 1263         if (sc->reply_desc_tag != NULL)
 1264                 bus_dma_tag_destroy(sc->reply_desc_tag);
 1265 
 1266         /*
 1267          * Free event detail memory
 1268          */
 1269         if (sc->evt_detail_phys_addr)
 1270                 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
 1271         if (sc->evt_detail_mem != NULL)
 1272                 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
 1273         if (sc->evt_detail_tag != NULL)
 1274                 bus_dma_tag_destroy(sc->evt_detail_tag);
 1275 
 1276         /*
 1277          * Free PD info memory
 1278          */
 1279         if (sc->pd_info_phys_addr)
 1280                 bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap);
 1281         if (sc->pd_info_mem != NULL)
 1282                 bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap);
 1283         if (sc->pd_info_tag != NULL)
 1284                 bus_dma_tag_destroy(sc->pd_info_tag);
 1285 
 1286         /*
 1287          * Free MFI frames
 1288          */
 1289         if (sc->mfi_cmd_list) {
 1290                 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
 1291                         mfi_cmd = sc->mfi_cmd_list[i];
 1292                         mrsas_free_frame(sc, mfi_cmd);
 1293                 }
 1294         }
 1295         if (sc->mficmd_frame_tag != NULL)
 1296                 bus_dma_tag_destroy(sc->mficmd_frame_tag);
 1297 
 1298         /*
 1299          * Free MPT internal command list
 1300          */
 1301         max_fw_cmds = sc->max_fw_cmds;
 1302         if (sc->mpt_cmd_list) {
 1303                 for (i = 0; i < max_fw_cmds; i++) {
 1304                         mpt_cmd = sc->mpt_cmd_list[i];
 1305                         bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
 1306                         free(sc->mpt_cmd_list[i], M_MRSAS);
 1307                 }
 1308                 free(sc->mpt_cmd_list, M_MRSAS);
 1309                 sc->mpt_cmd_list = NULL;
 1310         }
 1311         /*
 1312          * Free MFI internal command list
 1313          */
 1314 
 1315         if (sc->mfi_cmd_list) {
 1316                 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
 1317                         free(sc->mfi_cmd_list[i], M_MRSAS);
 1318                 }
 1319                 free(sc->mfi_cmd_list, M_MRSAS);
 1320                 sc->mfi_cmd_list = NULL;
 1321         }
 1322         /*
 1323          * Free request descriptor memory
 1324          */
 1325         free(sc->req_desc, M_MRSAS);
 1326         sc->req_desc = NULL;
 1327 
 1328         /*
 1329          * Destroy parent tag
 1330          */
 1331         if (sc->mrsas_parent_tag != NULL)
 1332                 bus_dma_tag_destroy(sc->mrsas_parent_tag);
 1333 
 1334         /*
 1335          * Free ctrl_info memory
 1336          */
 1337         if (sc->ctrl_info != NULL)
 1338                 free(sc->ctrl_info, M_MRSAS);
 1339 }
 1340 
 1341 /*
 1342  * mrsas_teardown_intr: Teardown interrupt
 1343  * input:                               Adapter instance soft state
 1344  *
 1345  * This function is called from mrsas_detach() to teardown and release bus
 1346  * interrupt resourse.
 1347  */
 1348 void
 1349 mrsas_teardown_intr(struct mrsas_softc *sc)
 1350 {
 1351         int i;
 1352 
 1353         if (!sc->msix_enable) {
 1354                 if (sc->intr_handle[0])
 1355                         bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
 1356                 if (sc->mrsas_irq[0] != NULL)
 1357                         bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
 1358                             sc->irq_id[0], sc->mrsas_irq[0]);
 1359                 sc->intr_handle[0] = NULL;
 1360         } else {
 1361                 for (i = 0; i < sc->msix_vectors; i++) {
 1362                         if (sc->intr_handle[i])
 1363                                 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
 1364                                     sc->intr_handle[i]);
 1365 
 1366                         if (sc->mrsas_irq[i] != NULL)
 1367                                 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
 1368                                     sc->irq_id[i], sc->mrsas_irq[i]);
 1369 
 1370                         sc->intr_handle[i] = NULL;
 1371                 }
 1372                 pci_release_msi(sc->mrsas_dev);
 1373         }
 1374 
 1375 }
 1376 
 1377 /*
 1378  * mrsas_suspend:       Suspend entry point
 1379  * input:                       Device struct pointer
 1380  *
 1381  * This function is the entry point for system suspend from the OS.
 1382  */
 1383 static int
 1384 mrsas_suspend(device_t dev)
 1385 {
 1386         /* This will be filled when the driver will have hibernation support */
 1387         return (0);
 1388 }
 1389 
 1390 /*
 1391  * mrsas_resume:        Resume entry point
 1392  * input:                       Device struct pointer
 1393  *
 1394  * This function is the entry point for system resume from the OS.
 1395  */
 1396 static int
 1397 mrsas_resume(device_t dev)
 1398 {
 1399         /* This will be filled when the driver will have hibernation support */
 1400         return (0);
 1401 }
 1402 
 1403 /**
 1404  * mrsas_get_softc_instance:    Find softc instance based on cmd type
 1405  *
 1406  * This function will return softc instance based on cmd type.
 1407  * In some case, application fire ioctl on required management instance and
 1408  * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
 1409  * case, else get the softc instance from host_no provided by application in
 1410  * user data.
 1411  */
 1412 
 1413 static struct mrsas_softc *
 1414 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
 1415 {
 1416         struct mrsas_softc *sc = NULL;
 1417         struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
 1418 
 1419         if (cmd == MRSAS_IOC_GET_PCI_INFO) {
 1420                 sc = dev->si_drv1;
 1421         } else {
 1422                 /*
 1423                  * get the Host number & the softc from data sent by the
 1424                  * Application
 1425                  */
 1426                 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
 1427                 if (sc == NULL)
 1428                         printf("There is no Controller number %d\n",
 1429                             user_ioc->host_no);
 1430                 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
 1431                         mrsas_dprint(sc, MRSAS_FAULT,
 1432                             "Invalid Controller number %d\n", user_ioc->host_no);
 1433         }
 1434 
 1435         return sc;
 1436 }
 1437 
 1438 /*
 1439  * mrsas_ioctl: IOCtl commands entry point.
 1440  *
 1441  * This function is the entry point for IOCtls from the OS.  It calls the
 1442  * appropriate function for processing depending on the command received.
 1443  */
 1444 static int
 1445 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
 1446     struct thread *td)
 1447 {
 1448         struct mrsas_softc *sc;
 1449         int ret = 0, i = 0;
 1450         MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
 1451 
 1452         sc = mrsas_get_softc_instance(dev, cmd, arg);
 1453         if (!sc)
 1454                 return ENOENT;
 1455 
 1456         if (sc->remove_in_progress ||
 1457                 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
 1458                 mrsas_dprint(sc, MRSAS_INFO,
 1459                     "Either driver remove or shutdown called or "
 1460                         "HW is in unrecoverable critical error state.\n");
 1461                 return ENOENT;
 1462         }
 1463         mtx_lock_spin(&sc->ioctl_lock);
 1464         if (!sc->reset_in_progress) {
 1465                 mtx_unlock_spin(&sc->ioctl_lock);
 1466                 goto do_ioctl;
 1467         }
 1468         mtx_unlock_spin(&sc->ioctl_lock);
 1469         while (sc->reset_in_progress) {
 1470                 i++;
 1471                 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
 1472                         mrsas_dprint(sc, MRSAS_INFO,
 1473                             "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
 1474                 }
 1475                 pause("mr_ioctl", hz);
 1476         }
 1477 
 1478 do_ioctl:
 1479         switch (cmd) {
 1480         case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
 1481 #ifdef COMPAT_FREEBSD32
 1482         case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
 1483 #endif
 1484                 /*
 1485                  * Decrement the Ioctl counting Semaphore before getting an
 1486                  * mfi command
 1487                  */
 1488                 sema_wait(&sc->ioctl_count_sema);
 1489 
 1490                 ret = mrsas_passthru(sc, (void *)arg, cmd);
 1491 
 1492                 /* Increment the Ioctl counting semaphore value */
 1493                 sema_post(&sc->ioctl_count_sema);
 1494 
 1495                 break;
 1496         case MRSAS_IOC_SCAN_BUS:
 1497                 ret = mrsas_bus_scan(sc);
 1498                 break;
 1499 
 1500         case MRSAS_IOC_GET_PCI_INFO:
 1501                 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
 1502                 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
 1503                 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
 1504                 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
 1505                 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
 1506                 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
 1507                 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
 1508                     "pci device no: %d, pci function no: %d,"
 1509                     "pci domain ID: %d\n",
 1510                     pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
 1511                     pciDrvInfo->functionNumber, pciDrvInfo->domainID);
 1512                 ret = 0;
 1513                 break;
 1514 
 1515         default:
 1516                 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
 1517                 ret = ENOENT;
 1518         }
 1519 
 1520         return (ret);
 1521 }
 1522 
 1523 /*
 1524  * mrsas_poll:  poll entry point for mrsas driver fd
 1525  *
 1526  * This function is the entry point for poll from the OS.  It waits for some AEN
 1527  * events to be triggered from the controller and notifies back.
 1528  */
 1529 static int
 1530 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
 1531 {
 1532         struct mrsas_softc *sc;
 1533         int revents = 0;
 1534 
 1535         sc = dev->si_drv1;
 1536 
 1537         if (poll_events & (POLLIN | POLLRDNORM)) {
 1538                 if (sc->mrsas_aen_triggered) {
 1539                         revents |= poll_events & (POLLIN | POLLRDNORM);
 1540                 }
 1541         }
 1542         if (revents == 0) {
 1543                 if (poll_events & (POLLIN | POLLRDNORM)) {
 1544                         mtx_lock(&sc->aen_lock);
 1545                         sc->mrsas_poll_waiting = 1;
 1546                         selrecord(td, &sc->mrsas_select);
 1547                         mtx_unlock(&sc->aen_lock);
 1548                 }
 1549         }
 1550         return revents;
 1551 }
 1552 
 1553 /*
 1554  * mrsas_setup_irq:     Set up interrupt
 1555  * input:                       Adapter instance soft state
 1556  *
 1557  * This function sets up interrupts as a bus resource, with flags indicating
 1558  * resource permitting contemporaneous sharing and for resource to activate
 1559  * atomically.
 1560  */
 1561 static int
 1562 mrsas_setup_irq(struct mrsas_softc *sc)
 1563 {
 1564         if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
 1565                 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
 1566 
 1567         else {
 1568                 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
 1569                 sc->irq_context[0].sc = sc;
 1570                 sc->irq_context[0].MSIxIndex = 0;
 1571                 sc->irq_id[0] = 0;
 1572                 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
 1573                     SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
 1574                 if (sc->mrsas_irq[0] == NULL) {
 1575                         device_printf(sc->mrsas_dev, "Cannot allocate legcay"
 1576                             "interrupt\n");
 1577                         return (FAIL);
 1578                 }
 1579                 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
 1580                     INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
 1581                     &sc->irq_context[0], &sc->intr_handle[0])) {
 1582                         device_printf(sc->mrsas_dev, "Cannot set up legacy"
 1583                             "interrupt\n");
 1584                         return (FAIL);
 1585                 }
 1586         }
 1587         return (0);
 1588 }
 1589 
 1590 /*
 1591  * mrsas_isr:   ISR entry point
 1592  * input:               argument pointer
 1593  *
 1594  * This function is the interrupt service routine entry point.  There are two
 1595  * types of interrupts, state change interrupt and response interrupt.  If an
 1596  * interrupt is not ours, we just return.
 1597  */
 1598 void
 1599 mrsas_isr(void *arg)
 1600 {
 1601         struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
 1602         struct mrsas_softc *sc = irq_context->sc;
 1603         int status = 0;
 1604 
 1605         if (sc->mask_interrupts)
 1606                 return;
 1607 
 1608         if (!sc->msix_vectors) {
 1609                 status = mrsas_clear_intr(sc);
 1610                 if (!status)
 1611                         return;
 1612         }
 1613         /* If we are resetting, bail */
 1614         if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
 1615                 printf(" Entered into ISR when OCR is going active. \n");
 1616                 mrsas_clear_intr(sc);
 1617                 return;
 1618         }
 1619         /* Process for reply request and clear response interrupt */
 1620         if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
 1621                 mrsas_clear_intr(sc);
 1622 
 1623         return;
 1624 }
 1625 
 1626 /*
 1627  * mrsas_complete_cmd:  Process reply request
 1628  * input:                               Adapter instance soft state
 1629  *
 1630  * This function is called from mrsas_isr() to process reply request and clear
 1631  * response interrupt. Processing of the reply request entails walking
 1632  * through the reply descriptor array for the command request  pended from
 1633  * Firmware.  We look at the Function field to determine the command type and
 1634  * perform the appropriate action.  Before we return, we clear the response
 1635  * interrupt.
 1636  */
 1637 int
 1638 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
 1639 {
 1640         Mpi2ReplyDescriptorsUnion_t *desc;
 1641         MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
 1642         MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
 1643         struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL;
 1644         struct mrsas_mfi_cmd *cmd_mfi;
 1645         u_int8_t reply_descript_type, *sense;
 1646         u_int16_t smid, num_completed;
 1647         u_int8_t status, extStatus;
 1648         union desc_value desc_val;
 1649         PLD_LOAD_BALANCE_INFO lbinfo;
 1650         u_int32_t device_id, data_length;
 1651         int threshold_reply_count = 0;
 1652 #if TM_DEBUG
 1653         MR_TASK_MANAGE_REQUEST *mr_tm_req;
 1654         MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
 1655 #endif
 1656 
 1657         /* If we have a hardware error, not need to continue */
 1658         if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
 1659                 return (DONE);
 1660 
 1661         desc = sc->reply_desc_mem;
 1662         desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
 1663             + sc->last_reply_idx[MSIxIndex];
 1664 
 1665         reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
 1666 
 1667         desc_val.word = desc->Words;
 1668         num_completed = 0;
 1669 
 1670         reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
 1671 
 1672         /* Find our reply descriptor for the command and process */
 1673         while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
 1674                 smid = le16toh(reply_desc->SMID);
 1675                 cmd_mpt = sc->mpt_cmd_list[smid - 1];
 1676                 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
 1677 
 1678                 status = scsi_io_req->RaidContext.raid_context.status;
 1679                 extStatus = scsi_io_req->RaidContext.raid_context.exStatus;
 1680                 sense = cmd_mpt->sense;
 1681                 data_length = scsi_io_req->DataLength;
 1682 
 1683                 switch (scsi_io_req->Function) {
 1684                 case MPI2_FUNCTION_SCSI_TASK_MGMT:
 1685 #if TM_DEBUG
 1686                         mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
 1687                         mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
 1688                             &mr_tm_req->TmRequest;
 1689                         device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
 1690                             "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
 1691 #endif
 1692             wakeup_one((void *)&sc->ocr_chan);
 1693             break;
 1694                 case MPI2_FUNCTION_SCSI_IO_REQUEST:     /* Fast Path IO. */
 1695                         device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
 1696                         lbinfo = &sc->load_balance_info[device_id];
 1697                         /* R1 load balancing for READ */
 1698                         if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
 1699                                 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
 1700                                 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
 1701                         }
 1702                         /* Fall thru and complete IO */
 1703                 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
 1704                         if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
 1705                                 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
 1706                                     extStatus, le32toh(data_length), sense);
 1707                                 mrsas_cmd_done(sc, cmd_mpt);
 1708                                 mrsas_atomic_dec(&sc->fw_outstanding);
 1709                         } else {
 1710                                 /*
 1711                                  * If the peer  Raid  1/10 fast path failed,
 1712                                  * mark IO as failed to the scsi layer.
 1713                                  * Overwrite the current status by the failed status
 1714                                  * and make sure that if any command fails,
 1715                                  * driver returns fail status to CAM.
 1716                                  */
 1717                                 cmd_mpt->cmd_completed = 1;
 1718                                 r1_cmd = cmd_mpt->peer_cmd;
 1719                                 if (r1_cmd->cmd_completed) {
 1720                                         if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) {
 1721                                                 status = r1_cmd->io_request->RaidContext.raid_context.status;
 1722                                                 extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus;
 1723                                                 data_length = r1_cmd->io_request->DataLength;
 1724                                                 sense = r1_cmd->sense;
 1725                                         }
 1726                                         r1_cmd->ccb_ptr = NULL;
 1727                                         if (r1_cmd->callout_owner) {
 1728                                                 callout_stop(&r1_cmd->cm_callout);
 1729                                                 r1_cmd->callout_owner  = false;
 1730                                         }
 1731                                         mrsas_release_mpt_cmd(r1_cmd);
 1732                                         mrsas_atomic_dec(&sc->fw_outstanding);
 1733                                         mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
 1734                                             extStatus, le32toh(data_length), sense);
 1735                                         mrsas_cmd_done(sc, cmd_mpt);
 1736                                         mrsas_atomic_dec(&sc->fw_outstanding);
 1737                                 }
 1738                         }
 1739                         break;
 1740                 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST:   /* MFI command */
 1741                         cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
 1742                         /*
 1743                          * Make sure NOT TO release the mfi command from the called
 1744                          * function's context if it is fired with issue_polled call.
 1745                          * And also make sure that the issue_polled call should only be
 1746                          * used if INTERRUPT IS DISABLED.
 1747                          */
 1748                         if (cmd_mfi->frame->hdr.flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
 1749                                 mrsas_release_mfi_cmd(cmd_mfi);
 1750                         else
 1751                                 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
 1752                         break;
 1753                 }
 1754 
 1755                 sc->last_reply_idx[MSIxIndex]++;
 1756                 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
 1757                         sc->last_reply_idx[MSIxIndex] = 0;
 1758 
 1759                 desc->Words = ~((uint64_t)0x00);        /* set it back to all
 1760                                                          * 0xFFFFFFFFs */
 1761                 num_completed++;
 1762                 threshold_reply_count++;
 1763 
 1764                 /* Get the next reply descriptor */
 1765                 if (!sc->last_reply_idx[MSIxIndex]) {
 1766                         desc = sc->reply_desc_mem;
 1767                         desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
 1768                 } else
 1769                         desc++;
 1770 
 1771                 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
 1772                 desc_val.word = desc->Words;
 1773 
 1774                 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
 1775 
 1776                 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
 1777                         break;
 1778 
 1779                 /*
 1780                  * Write to reply post index after completing threshold reply
 1781                  * count and still there are more replies in reply queue
 1782                  * pending to be completed.
 1783                  */
 1784                 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
 1785                         if (sc->msix_enable) {
 1786                                 if (sc->msix_combined)
 1787                                         mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
 1788                                             ((MSIxIndex & 0x7) << 24) |
 1789                                             sc->last_reply_idx[MSIxIndex]);
 1790                                 else
 1791                                         mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
 1792                                             sc->last_reply_idx[MSIxIndex]);
 1793                         } else
 1794                                 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
 1795                                     reply_post_host_index), sc->last_reply_idx[0]);
 1796 
 1797                         threshold_reply_count = 0;
 1798                 }
 1799         }
 1800 
 1801         /* No match, just return */
 1802         if (num_completed == 0)
 1803                 return (DONE);
 1804 
 1805         /* Clear response interrupt */
 1806         if (sc->msix_enable) {
 1807                 if (sc->msix_combined) {
 1808                         mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
 1809                             ((MSIxIndex & 0x7) << 24) |
 1810                             sc->last_reply_idx[MSIxIndex]);
 1811                 } else
 1812                         mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
 1813                             sc->last_reply_idx[MSIxIndex]);
 1814         } else
 1815                 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
 1816                     reply_post_host_index), sc->last_reply_idx[0]);
 1817 
 1818         return (0);
 1819 }
 1820 
 1821 /*
 1822  * mrsas_map_mpt_cmd_status:    Allocate DMAable memory.
 1823  * input:                                               Adapter instance soft state
 1824  *
 1825  * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
 1826  * It checks the command status and maps the appropriate CAM status for the
 1827  * CCB.
 1828  */
 1829 void
 1830 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status,
 1831     u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense)
 1832 {
 1833         struct mrsas_softc *sc = cmd->sc;
 1834         u_int8_t *sense_data;
 1835 
 1836         switch (status) {
 1837         case MFI_STAT_OK:
 1838                 ccb_ptr->ccb_h.status = CAM_REQ_CMP;
 1839                 break;
 1840         case MFI_STAT_SCSI_IO_FAILED:
 1841         case MFI_STAT_SCSI_DONE_WITH_ERROR:
 1842                 ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
 1843                 sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data;
 1844                 if (sense_data) {
 1845                         /* For now just copy 18 bytes back */
 1846                         memcpy(sense_data, sense, 18);
 1847                         ccb_ptr->csio.sense_len = 18;
 1848                         ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
 1849                 }
 1850                 break;
 1851         case MFI_STAT_LD_OFFLINE:
 1852         case MFI_STAT_DEVICE_NOT_FOUND:
 1853                 if (ccb_ptr->ccb_h.target_lun)
 1854                         ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
 1855                 else
 1856                         ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
 1857                 break;
 1858         case MFI_STAT_CONFIG_SEQ_MISMATCH:
 1859                 ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
 1860                 break;
 1861         default:
 1862                 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
 1863                 ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
 1864                 ccb_ptr->csio.scsi_status = status;
 1865         }
 1866         return;
 1867 }
 1868 
 1869 /*
 1870  * mrsas_alloc_mem:     Allocate DMAable memory
 1871  * input:                       Adapter instance soft state
 1872  *
 1873  * This function creates the parent DMA tag and allocates DMAable memory. DMA
 1874  * tag describes constraints of DMA mapping. Memory allocated is mapped into
 1875  * Kernel virtual address. Callback argument is physical memory address.
 1876  */
 1877 static int
 1878 mrsas_alloc_mem(struct mrsas_softc *sc)
 1879 {
 1880         u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size,
 1881                 evt_detail_size, count, pd_info_size;
 1882 
 1883         /*
 1884          * Allocate parent DMA tag
 1885          */
 1886         if (bus_dma_tag_create(
 1887             bus_get_dma_tag(sc->mrsas_dev),     /* parent */
 1888             1,                          /* alignment */
 1889             0,                          /* boundary */
 1890             BUS_SPACE_MAXADDR,          /* lowaddr */
 1891             BUS_SPACE_MAXADDR,          /* highaddr */
 1892             NULL, NULL,                 /* filter, filterarg */
 1893             BUS_SPACE_MAXSIZE,          /* maxsize */
 1894             BUS_SPACE_UNRESTRICTED,     /* nsegments */
 1895             BUS_SPACE_MAXSIZE,          /* maxsegsize */
 1896             0,                          /* flags */
 1897             NULL, NULL,                 /* lockfunc, lockarg */
 1898             &sc->mrsas_parent_tag       /* tag */
 1899             )) {
 1900                 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
 1901                 return (ENOMEM);
 1902         }
 1903         /*
 1904          * Allocate for version buffer
 1905          */
 1906         verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
 1907         if (bus_dma_tag_create(sc->mrsas_parent_tag,
 1908             1, 0,
 1909             BUS_SPACE_MAXADDR_32BIT,
 1910             BUS_SPACE_MAXADDR,
 1911             NULL, NULL,
 1912             verbuf_size,
 1913             1,
 1914             verbuf_size,
 1915             BUS_DMA_ALLOCNOW,
 1916             NULL, NULL,
 1917             &sc->verbuf_tag)) {
 1918                 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
 1919                 return (ENOMEM);
 1920         }
 1921         if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
 1922             BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
 1923                 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
 1924                 return (ENOMEM);
 1925         }
 1926         bzero(sc->verbuf_mem, verbuf_size);
 1927         if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
 1928             verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
 1929             BUS_DMA_NOWAIT)) {
 1930                 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
 1931                 return (ENOMEM);
 1932         }
 1933         /*
 1934          * Allocate IO Request Frames
 1935          */
 1936         io_req_size = sc->io_frames_alloc_sz;
 1937         if (bus_dma_tag_create(sc->mrsas_parent_tag,
 1938             16, 0,
 1939             BUS_SPACE_MAXADDR_32BIT,
 1940             BUS_SPACE_MAXADDR,
 1941             NULL, NULL,
 1942             io_req_size,
 1943             1,
 1944             io_req_size,
 1945             BUS_DMA_ALLOCNOW,
 1946             NULL, NULL,
 1947             &sc->io_request_tag)) {
 1948                 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
 1949                 return (ENOMEM);
 1950         }
 1951         if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
 1952             BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
 1953                 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
 1954                 return (ENOMEM);
 1955         }
 1956         bzero(sc->io_request_mem, io_req_size);
 1957         if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
 1958             sc->io_request_mem, io_req_size, mrsas_addr_cb,
 1959             &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
 1960                 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
 1961                 return (ENOMEM);
 1962         }
 1963         /*
 1964          * Allocate Chain Frames
 1965          */
 1966         chain_frame_size = sc->chain_frames_alloc_sz;
 1967         if (bus_dma_tag_create(sc->mrsas_parent_tag,
 1968             4, 0,
 1969             BUS_SPACE_MAXADDR_32BIT,
 1970             BUS_SPACE_MAXADDR,
 1971             NULL, NULL,
 1972             chain_frame_size,
 1973             1,
 1974             chain_frame_size,
 1975             BUS_DMA_ALLOCNOW,
 1976             NULL, NULL,
 1977             &sc->chain_frame_tag)) {
 1978                 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
 1979                 return (ENOMEM);
 1980         }
 1981         if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
 1982             BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
 1983                 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
 1984                 return (ENOMEM);
 1985         }
 1986         bzero(sc->chain_frame_mem, chain_frame_size);
 1987         if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
 1988             sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
 1989             &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
 1990                 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
 1991                 return (ENOMEM);
 1992         }
 1993         count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
 1994         /*
 1995          * Allocate Reply Descriptor Array
 1996          */
 1997         reply_desc_size = sc->reply_alloc_sz * count;
 1998         if (bus_dma_tag_create(sc->mrsas_parent_tag,
 1999             16, 0,
 2000             BUS_SPACE_MAXADDR_32BIT,
 2001             BUS_SPACE_MAXADDR,
 2002             NULL, NULL,
 2003             reply_desc_size,
 2004             1,
 2005             reply_desc_size,
 2006             BUS_DMA_ALLOCNOW,
 2007             NULL, NULL,
 2008             &sc->reply_desc_tag)) {
 2009                 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
 2010                 return (ENOMEM);
 2011         }
 2012         if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
 2013             BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
 2014                 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
 2015                 return (ENOMEM);
 2016         }
 2017         if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
 2018             sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
 2019             &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
 2020                 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
 2021                 return (ENOMEM);
 2022         }
 2023         /*
 2024          * Allocate Sense Buffer Array.  Keep in lower 4GB
 2025          */
 2026         sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
 2027         if (bus_dma_tag_create(sc->mrsas_parent_tag,
 2028             64, 0,
 2029             BUS_SPACE_MAXADDR_32BIT,
 2030             BUS_SPACE_MAXADDR,
 2031             NULL, NULL,
 2032             sense_size,
 2033             1,
 2034             sense_size,
 2035             BUS_DMA_ALLOCNOW,
 2036             NULL, NULL,
 2037             &sc->sense_tag)) {
 2038                 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
 2039                 return (ENOMEM);
 2040         }
 2041         if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
 2042             BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
 2043                 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
 2044                 return (ENOMEM);
 2045         }
 2046         if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
 2047             sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
 2048             BUS_DMA_NOWAIT)) {
 2049                 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
 2050                 return (ENOMEM);
 2051         }
 2052 
 2053         /*
 2054          * Allocate for Event detail structure
 2055          */
 2056         evt_detail_size = sizeof(struct mrsas_evt_detail);
 2057         if (bus_dma_tag_create(sc->mrsas_parent_tag,
 2058             1, 0,
 2059             BUS_SPACE_MAXADDR_32BIT,
 2060             BUS_SPACE_MAXADDR,
 2061             NULL, NULL,
 2062             evt_detail_size,
 2063             1,
 2064             evt_detail_size,
 2065             BUS_DMA_ALLOCNOW,
 2066             NULL, NULL,
 2067             &sc->evt_detail_tag)) {
 2068                 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
 2069                 return (ENOMEM);
 2070         }
 2071         if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
 2072             BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
 2073                 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
 2074                 return (ENOMEM);
 2075         }
 2076         bzero(sc->evt_detail_mem, evt_detail_size);
 2077         if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
 2078             sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
 2079             &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
 2080                 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
 2081                 return (ENOMEM);
 2082         }
 2083 
 2084         /*
 2085          * Allocate for PD INFO structure
 2086          */
 2087         pd_info_size = sizeof(struct mrsas_pd_info);
 2088         if (bus_dma_tag_create(sc->mrsas_parent_tag,
 2089             1, 0,
 2090             BUS_SPACE_MAXADDR_32BIT,
 2091             BUS_SPACE_MAXADDR,
 2092             NULL, NULL,
 2093             pd_info_size,
 2094             1,
 2095             pd_info_size,
 2096             BUS_DMA_ALLOCNOW,
 2097             NULL, NULL,
 2098             &sc->pd_info_tag)) {
 2099                 device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n");
 2100                 return (ENOMEM);
 2101         }
 2102         if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem,
 2103             BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) {
 2104                 device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n");
 2105                 return (ENOMEM);
 2106         }
 2107         bzero(sc->pd_info_mem, pd_info_size);
 2108         if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap,
 2109             sc->pd_info_mem, pd_info_size, mrsas_addr_cb,
 2110             &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) {
 2111                 device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n");
 2112                 return (ENOMEM);
 2113         }
 2114 
 2115         /*
 2116          * Create a dma tag for data buffers; size will be the maximum
 2117          * possible I/O size (280kB).
 2118          */
 2119         if (bus_dma_tag_create(sc->mrsas_parent_tag,
 2120             1,
 2121             0,
 2122             BUS_SPACE_MAXADDR,
 2123             BUS_SPACE_MAXADDR,
 2124             NULL, NULL,
 2125             maxphys,
 2126             sc->max_num_sge,            /* nsegments */
 2127             maxphys,
 2128             BUS_DMA_ALLOCNOW,
 2129             busdma_lock_mutex,
 2130             &sc->io_lock,
 2131             &sc->data_tag)) {
 2132                 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
 2133                 return (ENOMEM);
 2134         }
 2135         return (0);
 2136 }
 2137 
 2138 /*
 2139  * mrsas_addr_cb:       Callback function of bus_dmamap_load()
 2140  * input:                       callback argument, machine dependent type
 2141  *                                      that describes DMA segments, number of segments, error code
 2142  *
 2143  * This function is for the driver to receive mapping information resultant of
 2144  * the bus_dmamap_load(). The information is actually not being used, but the
 2145  * address is saved anyway.
 2146  */
 2147 void
 2148 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
 2149 {
 2150         bus_addr_t *addr;
 2151 
 2152         addr = arg;
 2153         *addr = segs[0].ds_addr;
 2154 }
 2155 
 2156 /*
 2157  * mrsas_setup_raidmap: Set up RAID map.
 2158  * input:                               Adapter instance soft state
 2159  *
 2160  * Allocate DMA memory for the RAID maps and perform setup.
 2161  */
 2162 static int
 2163 mrsas_setup_raidmap(struct mrsas_softc *sc)
 2164 {
 2165         int i;
 2166 
 2167         for (i = 0; i < 2; i++) {
 2168                 sc->ld_drv_map[i] =
 2169                     (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
 2170                 /* Do Error handling */
 2171                 if (!sc->ld_drv_map[i]) {
 2172                         device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
 2173 
 2174                         if (i == 1)
 2175                                 free(sc->ld_drv_map[0], M_MRSAS);
 2176                         /* ABORT driver initialization */
 2177                         goto ABORT;
 2178                 }
 2179         }
 2180 
 2181         for (int i = 0; i < 2; i++) {
 2182                 if (bus_dma_tag_create(sc->mrsas_parent_tag,
 2183                     4, 0,
 2184                     BUS_SPACE_MAXADDR_32BIT,
 2185                     BUS_SPACE_MAXADDR,
 2186                     NULL, NULL,
 2187                     sc->max_map_sz,
 2188                     1,
 2189                     sc->max_map_sz,
 2190                     BUS_DMA_ALLOCNOW,
 2191                     NULL, NULL,
 2192                     &sc->raidmap_tag[i])) {
 2193                         device_printf(sc->mrsas_dev,
 2194                             "Cannot allocate raid map tag.\n");
 2195                         return (ENOMEM);
 2196                 }
 2197                 if (bus_dmamem_alloc(sc->raidmap_tag[i],
 2198                     (void **)&sc->raidmap_mem[i],
 2199                     BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
 2200                         device_printf(sc->mrsas_dev,
 2201                             "Cannot allocate raidmap memory.\n");
 2202                         return (ENOMEM);
 2203                 }
 2204                 bzero(sc->raidmap_mem[i], sc->max_map_sz);
 2205 
 2206                 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
 2207                     sc->raidmap_mem[i], sc->max_map_sz,
 2208                     mrsas_addr_cb, &sc->raidmap_phys_addr[i],
 2209                     BUS_DMA_NOWAIT)) {
 2210                         device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
 2211                         return (ENOMEM);
 2212                 }
 2213                 if (!sc->raidmap_mem[i]) {
 2214                         device_printf(sc->mrsas_dev,
 2215                             "Cannot allocate memory for raid map.\n");
 2216                         return (ENOMEM);
 2217                 }
 2218         }
 2219 
 2220         if (!mrsas_get_map_info(sc))
 2221                 mrsas_sync_map_info(sc);
 2222 
 2223         return (0);
 2224 
 2225 ABORT:
 2226         return (1);
 2227 }
 2228 
 2229 /**
 2230  * megasas_setup_jbod_map -     setup jbod map for FP seq_number.
 2231  * @sc:                         Adapter soft state
 2232  *
 2233  * Return 0 on success.
 2234  */
 2235 void
 2236 megasas_setup_jbod_map(struct mrsas_softc *sc)
 2237 {
 2238         int i;
 2239         uint32_t pd_seq_map_sz;
 2240 
 2241         pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
 2242             (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
 2243 
 2244         if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
 2245                 sc->use_seqnum_jbod_fp = 0;
 2246                 return;
 2247         }
 2248         if (sc->jbodmap_mem[0])
 2249                 goto skip_alloc;
 2250 
 2251         for (i = 0; i < 2; i++) {
 2252                 if (bus_dma_tag_create(sc->mrsas_parent_tag,
 2253                     4, 0,
 2254                     BUS_SPACE_MAXADDR_32BIT,
 2255                     BUS_SPACE_MAXADDR,
 2256                     NULL, NULL,
 2257                     pd_seq_map_sz,
 2258                     1,
 2259                     pd_seq_map_sz,
 2260                     BUS_DMA_ALLOCNOW,
 2261                     NULL, NULL,
 2262                     &sc->jbodmap_tag[i])) {
 2263                         device_printf(sc->mrsas_dev,
 2264                             "Cannot allocate jbod map tag.\n");
 2265                         return;
 2266                 }
 2267                 if (bus_dmamem_alloc(sc->jbodmap_tag[i],
 2268                     (void **)&sc->jbodmap_mem[i],
 2269                     BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
 2270                         device_printf(sc->mrsas_dev,
 2271                             "Cannot allocate jbod map memory.\n");
 2272                         return;
 2273                 }
 2274                 bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
 2275 
 2276                 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
 2277                     sc->jbodmap_mem[i], pd_seq_map_sz,
 2278                     mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
 2279                     BUS_DMA_NOWAIT)) {
 2280                         device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
 2281                         return;
 2282                 }
 2283                 if (!sc->jbodmap_mem[i]) {
 2284                         device_printf(sc->mrsas_dev,
 2285                             "Cannot allocate memory for jbod map.\n");
 2286                         sc->use_seqnum_jbod_fp = 0;
 2287                         return;
 2288                 }
 2289         }
 2290 
 2291 skip_alloc:
 2292         if (!megasas_sync_pd_seq_num(sc, false) &&
 2293             !megasas_sync_pd_seq_num(sc, true))
 2294                 sc->use_seqnum_jbod_fp = 1;
 2295         else
 2296                 sc->use_seqnum_jbod_fp = 0;
 2297 
 2298         device_printf(sc->mrsas_dev, "Jbod map is supported\n");
 2299 }
 2300 
 2301 /*
 2302  * mrsas_init_fw:       Initialize Firmware
 2303  * input:                       Adapter soft state
 2304  *
 2305  * Calls transition_to_ready() to make sure Firmware is in operational state and
 2306  * calls mrsas_init_adapter() to send IOC_INIT command to Firmware.  It
 2307  * issues internal commands to get the controller info after the IOC_INIT
 2308  * command response is received by Firmware.  Note:  code relating to
 2309  * get_pdlist, get_ld_list and max_sectors are currently not being used, it
 2310  * is left here as placeholder.
 2311  */
 2312 static int
 2313 mrsas_init_fw(struct mrsas_softc *sc)
 2314 {
 2315 
 2316         int ret, loop, ocr = 0;
 2317         u_int32_t max_sectors_1;
 2318         u_int32_t max_sectors_2;
 2319         u_int32_t tmp_sectors;
 2320         u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4;
 2321         int msix_enable = 0;
 2322         int fw_msix_count = 0;
 2323         int i, j;
 2324 
 2325         /* Make sure Firmware is ready */
 2326         ret = mrsas_transition_to_ready(sc, ocr);
 2327         if (ret != SUCCESS) {
 2328                 return (ret);
 2329         }
 2330         if (sc->is_ventura || sc->is_aero) {
 2331                 scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3));
 2332 #if VD_EXT_DEBUG
 2333                 device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3);
 2334 #endif
 2335                 sc->maxRaidMapSize = ((scratch_pad_3 >>
 2336                     MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
 2337                     MR_MAX_RAID_MAP_SIZE_MASK);
 2338         }
 2339         /* MSI-x index 0- reply post host index register */
 2340         sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
 2341         /* Check if MSI-X is supported while in ready state */
 2342         msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
 2343 
 2344         if (msix_enable) {
 2345                 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
 2346                     outbound_scratch_pad_2));
 2347 
 2348                 /* Check max MSI-X vectors */
 2349                 if (sc->device_id == MRSAS_TBOLT) {
 2350                         sc->msix_vectors = (scratch_pad_2
 2351                             & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
 2352                         fw_msix_count = sc->msix_vectors;
 2353                 } else {
 2354                         /* Invader/Fury supports 96 MSI-X vectors */
 2355                         sc->msix_vectors = ((scratch_pad_2
 2356                             & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
 2357                             >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
 2358                         fw_msix_count = sc->msix_vectors;
 2359 
 2360                         if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) ||
 2361                                 ((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16)))
 2362                                 sc->msix_combined = true;
 2363                         /*
 2364                          * Save 1-15 reply post index
 2365                          * address to local memory Index 0
 2366                          * is already saved from reg offset
 2367                          * MPI2_REPLY_POST_HOST_INDEX_OFFSET
 2368                          */
 2369                         for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
 2370                             loop++) {
 2371                                 sc->msix_reg_offset[loop] =
 2372                                     MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
 2373                                     (loop * 0x10);
 2374                         }
 2375                 }
 2376 
 2377                 /* Don't bother allocating more MSI-X vectors than cpus */
 2378                 sc->msix_vectors = min(sc->msix_vectors,
 2379                     mp_ncpus);
 2380 
 2381                 /* Allocate MSI-x vectors */
 2382                 if (mrsas_allocate_msix(sc) == SUCCESS)
 2383                         sc->msix_enable = 1;
 2384                 else
 2385                         sc->msix_enable = 0;
 2386 
 2387                 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
 2388                     "Online CPU %d Current MSIX <%d>\n",
 2389                     fw_msix_count, mp_ncpus, sc->msix_vectors);
 2390         }
 2391         /*
 2392      * MSI-X host index 0 is common for all adapter.
 2393      * It is used for all MPT based Adapters.
 2394          */
 2395         if (sc->msix_combined) {
 2396                 sc->msix_reg_offset[0] =
 2397                     MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET;
 2398         }
 2399         if (mrsas_init_adapter(sc) != SUCCESS) {
 2400                 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
 2401                 return (1);
 2402         }
 2403 
 2404         if (sc->is_ventura || sc->is_aero) {
 2405                 scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
 2406                     outbound_scratch_pad_4));
 2407                 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT)
 2408                         sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK);
 2409 
 2410                 device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size);
 2411         }
 2412 
 2413         /* Allocate internal commands for pass-thru */
 2414         if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
 2415                 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
 2416                 return (1);
 2417         }
 2418         sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
 2419         if (!sc->ctrl_info) {
 2420                 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
 2421                 return (1);
 2422         }
 2423         /*
 2424          * Get the controller info from FW, so that the MAX VD support
 2425          * availability can be decided.
 2426          */
 2427         if (mrsas_get_ctrl_info(sc)) {
 2428                 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
 2429                 return (1);
 2430         }
 2431         sc->secure_jbod_support =
 2432             (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
 2433 
 2434         if (sc->secure_jbod_support)
 2435                 device_printf(sc->mrsas_dev, "FW supports SED \n");
 2436 
 2437         if (sc->use_seqnum_jbod_fp)
 2438                 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
 2439 
 2440         if (sc->support_morethan256jbod)
 2441                 device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n");
 2442 
 2443         if (mrsas_setup_raidmap(sc) != SUCCESS) {
 2444                 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
 2445                     "There seems to be some problem in the controller\n"
 2446                     "Please contact to the SUPPORT TEAM if the problem persists\n");
 2447         }
 2448         megasas_setup_jbod_map(sc);
 2449 
 2450         memset(sc->target_list, 0,
 2451                 MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target));
 2452         for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++)
 2453                 sc->target_list[i].target_id = 0xffff;
 2454 
 2455         /* For pass-thru, get PD/LD list and controller info */
 2456         memset(sc->pd_list, 0,
 2457             MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
 2458         if (mrsas_get_pd_list(sc) != SUCCESS) {
 2459                 device_printf(sc->mrsas_dev, "Get PD list failed.\n");
 2460                 return (1);
 2461         }
 2462         memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
 2463         if (mrsas_get_ld_list(sc) != SUCCESS) {
 2464                 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
 2465                 return (1);
 2466         }
 2467 
 2468         if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) {
 2469                 sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) *
 2470                                                 MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT);
 2471                 if (!sc->streamDetectByLD) {
 2472                         device_printf(sc->mrsas_dev,
 2473                                 "unable to allocate stream detection for pool of LDs\n");
 2474                         return (1);
 2475                 }
 2476                 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
 2477                         sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT);
 2478                         if (!sc->streamDetectByLD[i]) {
 2479                                 device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n");
 2480                                 for (j = 0; j < i; ++j)
 2481                                         free(sc->streamDetectByLD[j], M_MRSAS);
 2482                                 free(sc->streamDetectByLD, M_MRSAS);
 2483                                 sc->streamDetectByLD = NULL;
 2484                                 return (1);
 2485                         }
 2486                         memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
 2487                         sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
 2488                 }
 2489         }
 2490 
 2491         /*
 2492          * Compute the max allowed sectors per IO: The controller info has
 2493          * two limits on max sectors. Driver should use the minimum of these
 2494          * two.
 2495          *
 2496          * 1 << stripe_sz_ops.min = max sectors per strip
 2497          *
 2498          * Note that older firmwares ( < FW ver 30) didn't report information to
 2499          * calculate max_sectors_1. So the number ended up as zero always.
 2500          */
 2501         tmp_sectors = 0;
 2502         max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
 2503             sc->ctrl_info->max_strips_per_io;
 2504         max_sectors_2 = sc->ctrl_info->max_request_size;
 2505         tmp_sectors = min(max_sectors_1, max_sectors_2);
 2506         sc->max_sectors_per_req = (sc->max_num_sge - 1) * MRSAS_PAGE_SIZE / 512;
 2507 
 2508         if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
 2509                 sc->max_sectors_per_req = tmp_sectors;
 2510 
 2511         sc->disableOnlineCtrlReset =
 2512             sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
 2513         sc->UnevenSpanSupport =
 2514             sc->ctrl_info->adapterOperations2.supportUnevenSpans;
 2515         if (sc->UnevenSpanSupport) {
 2516                 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
 2517                     sc->UnevenSpanSupport);
 2518 
 2519                 if (MR_ValidateMapInfo(sc))
 2520                         sc->fast_path_io = 1;
 2521                 else
 2522                         sc->fast_path_io = 0;
 2523         }
 2524                 
 2525         device_printf(sc->mrsas_dev, "max_fw_cmds: %u  max_scsi_cmds: %u\n",
 2526                 sc->max_fw_cmds, sc->max_scsi_cmds);
 2527         return (0);
 2528 }
 2529 
 2530 /*
 2531  * mrsas_init_adapter:  Initializes the adapter/controller
 2532  * input:                               Adapter soft state
 2533  *
 2534  * Prepares for the issuing of the IOC Init cmd to FW for initializing the
 2535  * ROC/controller.  The FW register is read to determined the number of
 2536  * commands that is supported.  All memory allocations for IO is based on
 2537  * max_cmd.  Appropriate calculations are performed in this function.
 2538  */
 2539 int
 2540 mrsas_init_adapter(struct mrsas_softc *sc)
 2541 {
 2542         uint32_t status;
 2543         u_int32_t scratch_pad_2;
 2544         int ret;
 2545         int i = 0;
 2546 
 2547         /* Read FW status register */
 2548         status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
 2549 
 2550         sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
 2551 
 2552         /* Decrement the max supported by 1, to correlate with FW */
 2553         sc->max_fw_cmds = sc->max_fw_cmds - 1;
 2554         sc->max_scsi_cmds = sc->max_fw_cmds - MRSAS_MAX_MFI_CMDS;
 2555 
 2556         /* Determine allocation size of command frames */
 2557         sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2;
 2558         sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds;
 2559         sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
 2560         sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
 2561             (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1));
 2562         scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
 2563             outbound_scratch_pad_2));
 2564 
 2565         mrsas_dprint(sc, MRSAS_TRACE, "%s: sc->reply_q_depth 0x%x,"
 2566             "sc->request_alloc_sz 0x%x, sc->reply_alloc_sz 0x%x,"
 2567             "sc->io_frames_alloc_sz 0x%x\n", __func__,
 2568             sc->reply_q_depth, sc->request_alloc_sz,
 2569             sc->reply_alloc_sz, sc->io_frames_alloc_sz);
 2570 
 2571         /*
 2572          * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
 2573          * Firmware support extended IO chain frame which is 4 time more
 2574          * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
 2575          * 1K 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
 2576          */
 2577         if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
 2578                 sc->max_chain_frame_sz =
 2579                     ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
 2580                     * MEGASAS_1MB_IO;
 2581         else
 2582                 sc->max_chain_frame_sz =
 2583                     ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
 2584                     * MEGASAS_256K_IO;
 2585 
 2586         sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds;
 2587         sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
 2588             offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
 2589 
 2590         sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
 2591         sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
 2592 
 2593         mrsas_dprint(sc, MRSAS_INFO,
 2594             "max sge: 0x%x, max chain frame size: 0x%x, "
 2595             "max fw cmd: 0x%x sc->chain_frames_alloc_sz: 0x%x\n",
 2596             sc->max_num_sge,
 2597             sc->max_chain_frame_sz, sc->max_fw_cmds,
 2598             sc->chain_frames_alloc_sz);
 2599 
 2600         /* Used for pass thru MFI frame (DCMD) */
 2601         sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
 2602 
 2603         sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
 2604             sizeof(MPI2_SGE_IO_UNION)) / 16;
 2605 
 2606         int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
 2607 
 2608         for (i = 0; i < count; i++)
 2609                 sc->last_reply_idx[i] = 0;
 2610 
 2611         ret = mrsas_alloc_mem(sc);
 2612         if (ret != SUCCESS)
 2613                 return (ret);
 2614 
 2615         ret = mrsas_alloc_mpt_cmds(sc);
 2616         if (ret != SUCCESS)
 2617                 return (ret);
 2618 
 2619         ret = mrsas_ioc_init(sc);
 2620         if (ret != SUCCESS)
 2621                 return (ret);
 2622 
 2623         return (0);
 2624 }
 2625 
 2626 /*
 2627  * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
 2628  * input:                               Adapter soft state
 2629  *
 2630  * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
 2631  */
 2632 int
 2633 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
 2634 {
 2635         int ioc_init_size;
 2636 
 2637         /* Allocate IOC INIT command */
 2638         ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
 2639         if (bus_dma_tag_create(sc->mrsas_parent_tag,
 2640             1, 0,
 2641             BUS_SPACE_MAXADDR_32BIT,
 2642             BUS_SPACE_MAXADDR,
 2643             NULL, NULL,
 2644             ioc_init_size,
 2645             1,
 2646             ioc_init_size,
 2647             BUS_DMA_ALLOCNOW,
 2648             NULL, NULL,
 2649             &sc->ioc_init_tag)) {
 2650                 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
 2651                 return (ENOMEM);
 2652         }
 2653         if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
 2654             BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
 2655                 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
 2656                 return (ENOMEM);
 2657         }
 2658         bzero(sc->ioc_init_mem, ioc_init_size);
 2659         if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
 2660             sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
 2661             &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
 2662                 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
 2663                 return (ENOMEM);
 2664         }
 2665         return (0);
 2666 }
 2667 
 2668 /*
 2669  * mrsas_free_ioc_cmd:  Allocates memory for IOC Init command
 2670  * input:                               Adapter soft state
 2671  *
 2672  * Deallocates memory of the IOC Init cmd.
 2673  */
 2674 void
 2675 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
 2676 {
 2677         if (sc->ioc_init_phys_mem)
 2678                 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
 2679         if (sc->ioc_init_mem != NULL)
 2680                 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
 2681         if (sc->ioc_init_tag != NULL)
 2682                 bus_dma_tag_destroy(sc->ioc_init_tag);
 2683 }
 2684 
 2685 /*
 2686  * mrsas_ioc_init:      Sends IOC Init command to FW
 2687  * input:                       Adapter soft state
 2688  *
 2689  * Issues the IOC Init cmd to FW to initialize the ROC/controller.
 2690  */
 2691 int
 2692 mrsas_ioc_init(struct mrsas_softc *sc)
 2693 {
 2694         struct mrsas_init_frame *init_frame;
 2695         pMpi2IOCInitRequest_t IOCInitMsg;
 2696         MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
 2697         u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
 2698         bus_addr_t phys_addr;
 2699         int i, retcode = 0;
 2700         u_int32_t scratch_pad_2;
 2701 
 2702         /* Allocate memory for the IOC INIT command */
 2703         if (mrsas_alloc_ioc_cmd(sc)) {
 2704                 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
 2705                 return (1);
 2706         }
 2707 
 2708         if (!sc->block_sync_cache) {
 2709                 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
 2710                     outbound_scratch_pad_2));
 2711                 sc->fw_sync_cache_support = (scratch_pad_2 &
 2712                     MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
 2713         }
 2714 
 2715         IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
 2716         IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
 2717         IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
 2718         IOCInitMsg->MsgVersion = htole16(MPI2_VERSION);
 2719         IOCInitMsg->HeaderVersion = htole16(MPI2_HEADER_VERSION);
 2720         IOCInitMsg->SystemRequestFrameSize = htole16(MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
 2721         IOCInitMsg->ReplyDescriptorPostQueueDepth = htole16(sc->reply_q_depth);
 2722         IOCInitMsg->ReplyDescriptorPostQueueAddress = htole64(sc->reply_desc_phys_addr);
 2723         IOCInitMsg->SystemRequestFrameBaseAddress = htole64(sc->io_request_phys_addr);
 2724         IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
 2725         IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
 2726 
 2727         init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
 2728         init_frame->cmd = MFI_CMD_INIT;
 2729         init_frame->cmd_status = 0xFF;
 2730         init_frame->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
 2731 
 2732         /* driver support Extended MSIX */
 2733         if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
 2734                 init_frame->driver_operations.
 2735                     mfi_capabilities.support_additional_msix = 1;
 2736         }
 2737         if (sc->verbuf_mem) {
 2738                 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
 2739                     MRSAS_VERSION);
 2740                 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
 2741                 init_frame->driver_ver_hi = 0;
 2742         }
 2743         init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
 2744         init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
 2745         init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
 2746         if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
 2747                 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
 2748 
 2749         init_frame->driver_operations.reg = htole32(init_frame->driver_operations.reg);
 2750 
 2751         phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
 2752         init_frame->queue_info_new_phys_addr_lo = htole32(phys_addr);
 2753         init_frame->data_xfer_len = htole32(sizeof(Mpi2IOCInitRequest_t));
 2754 
 2755         req_desc.addr.Words = htole64((bus_addr_t)sc->ioc_init_phys_mem);
 2756         req_desc.MFAIo.RequestFlags =
 2757             (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 2758 
 2759         mrsas_disable_intr(sc);
 2760         mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
 2761         mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high);
 2762 
 2763         /*
 2764          * Poll response timer to wait for Firmware response.  While this
 2765          * timer with the DELAY call could block CPU, the time interval for
 2766          * this is only 1 millisecond.
 2767          */
 2768         if (init_frame->cmd_status == 0xFF) {
 2769                 for (i = 0; i < (max_wait * 1000); i++) {
 2770                         if (init_frame->cmd_status == 0xFF)
 2771                                 DELAY(1000);
 2772                         else
 2773                                 break;
 2774                 }
 2775         }
 2776         if (init_frame->cmd_status == 0)
 2777                 mrsas_dprint(sc, MRSAS_OCR,
 2778                     "IOC INIT response received from FW.\n");
 2779         else {
 2780                 if (init_frame->cmd_status == 0xFF)
 2781                         device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
 2782                 else
 2783                         device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
 2784                 retcode = 1;
 2785         }
 2786 
 2787         if (sc->is_aero) {
 2788                 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
 2789                     outbound_scratch_pad_2));
 2790                 sc->atomic_desc_support = (scratch_pad_2 &
 2791                         MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0;
 2792                 device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n",
 2793                         sc->atomic_desc_support ? "Yes" : "No");
 2794         }
 2795 
 2796         mrsas_free_ioc_cmd(sc);
 2797         return (retcode);
 2798 }
 2799 
 2800 /*
 2801  * mrsas_alloc_mpt_cmds:        Allocates the command packets
 2802  * input:                                       Adapter instance soft state
 2803  *
 2804  * This function allocates the internal commands for IOs. Each command that is
 2805  * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
 2806  * array is allocated with mrsas_mpt_cmd context.  The free commands are
 2807  * maintained in a linked list (cmd pool). SMID value range is from 1 to
 2808  * max_fw_cmds.
 2809  */
 2810 int
 2811 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
 2812 {
 2813         int i, j;
 2814         u_int32_t max_fw_cmds, count;
 2815         struct mrsas_mpt_cmd *cmd;
 2816         pMpi2ReplyDescriptorsUnion_t reply_desc;
 2817         u_int32_t offset, chain_offset, sense_offset;
 2818         bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
 2819         u_int8_t *io_req_base, *chain_frame_base, *sense_base;
 2820 
 2821         max_fw_cmds = sc->max_fw_cmds;
 2822 
 2823         sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
 2824         if (!sc->req_desc) {
 2825                 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
 2826                 return (ENOMEM);
 2827         }
 2828         memset(sc->req_desc, 0, sc->request_alloc_sz);
 2829 
 2830         /*
 2831          * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
 2832          * Allocate the dynamic array first and then allocate individual
 2833          * commands.
 2834          */
 2835         sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds,
 2836             M_MRSAS, M_NOWAIT);
 2837         if (!sc->mpt_cmd_list) {
 2838                 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
 2839                 return (ENOMEM);
 2840         }
 2841         memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds);
 2842         for (i = 0; i < max_fw_cmds; i++) {
 2843                 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
 2844                     M_MRSAS, M_NOWAIT);
 2845                 if (!sc->mpt_cmd_list[i]) {
 2846                         for (j = 0; j < i; j++)
 2847                                 free(sc->mpt_cmd_list[j], M_MRSAS);
 2848                         free(sc->mpt_cmd_list, M_MRSAS);
 2849                         sc->mpt_cmd_list = NULL;
 2850                         return (ENOMEM);
 2851                 }
 2852         }
 2853 
 2854         io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
 2855         io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
 2856         chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
 2857         chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
 2858         sense_base = (u_int8_t *)sc->sense_mem;
 2859         sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
 2860         for (i = 0; i < max_fw_cmds; i++) {
 2861                 cmd = sc->mpt_cmd_list[i];
 2862                 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
 2863                 chain_offset = sc->max_chain_frame_sz * i;
 2864                 sense_offset = MRSAS_SENSE_LEN * i;
 2865                 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
 2866                 cmd->index = i + 1;
 2867                 cmd->ccb_ptr = NULL;
 2868                 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
 2869                 callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
 2870                 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
 2871                 cmd->sc = sc;
 2872                 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
 2873                 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
 2874                 cmd->io_request_phys_addr = io_req_base_phys + offset;
 2875                 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
 2876                 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
 2877                 cmd->sense = sense_base + sense_offset;
 2878                 cmd->sense_phys_addr = sense_base_phys + sense_offset;
 2879                 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
 2880                         return (FAIL);
 2881                 }
 2882                 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
 2883         }
 2884 
 2885         /* Initialize reply descriptor array to 0xFFFFFFFF */
 2886         reply_desc = sc->reply_desc_mem;
 2887         count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
 2888         for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
 2889                 reply_desc->Words = MRSAS_ULONG_MAX;
 2890         }
 2891         return (0);
 2892 }
 2893 
 2894 /*
 2895  * mrsas_write_64bit_req_dsc:   Writes 64 bit request descriptor to FW
 2896  * input:                       Adapter softstate
 2897  *                              request descriptor address low
 2898  *                              request descriptor address high
 2899  */
 2900 void
 2901 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
 2902     u_int32_t req_desc_hi)
 2903 {
 2904         mtx_lock(&sc->pci_lock);
 2905         mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
 2906             le32toh(req_desc_lo));
 2907         mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
 2908             le32toh(req_desc_hi));
 2909         mtx_unlock(&sc->pci_lock);
 2910 }
 2911 
 2912 /*
 2913  * mrsas_fire_cmd:      Sends command to FW
 2914  * input:               Adapter softstate
 2915  *                      request descriptor address low
 2916  *                      request descriptor address high
 2917  *
 2918  * This functions fires the command to Firmware by writing to the
 2919  * inbound_low_queue_port and inbound_high_queue_port.
 2920  */
 2921 void
 2922 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
 2923     u_int32_t req_desc_hi)
 2924 {
 2925         if (sc->atomic_desc_support)
 2926                 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port),
 2927                     le32toh(req_desc_lo));
 2928         else
 2929                 mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi);
 2930 }
 2931 
 2932 /*
 2933  * mrsas_transition_to_ready:  Move FW to Ready state input:
 2934  * Adapter instance soft state
 2935  *
 2936  * During the initialization, FW passes can potentially be in any one of several
 2937  * possible states. If the FW in operational, waiting-for-handshake states,
 2938  * driver must take steps to bring it to ready state. Otherwise, it has to
 2939  * wait for the ready state.
 2940  */
 2941 int
 2942 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
 2943 {
 2944         int i;
 2945         u_int8_t max_wait;
 2946         u_int32_t val, fw_state;
 2947         u_int32_t cur_state __unused;
 2948         u_int32_t abs_state, curr_abs_state;
 2949 
 2950         val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
 2951         fw_state = val & MFI_STATE_MASK;
 2952         max_wait = MRSAS_RESET_WAIT_TIME;
 2953 
 2954         if (fw_state != MFI_STATE_READY)
 2955                 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
 2956 
 2957         while (fw_state != MFI_STATE_READY) {
 2958                 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
 2959                 switch (fw_state) {
 2960                 case MFI_STATE_FAULT:
 2961                         device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
 2962                         if (ocr) {
 2963                                 cur_state = MFI_STATE_FAULT;
 2964                                 break;
 2965                         } else
 2966                                 return -ENODEV;
 2967                 case MFI_STATE_WAIT_HANDSHAKE:
 2968                         /* Set the CLR bit in inbound doorbell */
 2969                         mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
 2970                             MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
 2971                         cur_state = MFI_STATE_WAIT_HANDSHAKE;
 2972                         break;
 2973                 case MFI_STATE_BOOT_MESSAGE_PENDING:
 2974                         mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
 2975                             MFI_INIT_HOTPLUG);
 2976                         cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
 2977                         break;
 2978                 case MFI_STATE_OPERATIONAL:
 2979                         /*
 2980                          * Bring it to READY state; assuming max wait 10
 2981                          * secs
 2982                          */
 2983                         mrsas_disable_intr(sc);
 2984                         mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
 2985                         for (i = 0; i < max_wait * 1000; i++) {
 2986                                 if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
 2987                                         DELAY(1000);
 2988                                 else
 2989                                         break;
 2990                         }
 2991                         cur_state = MFI_STATE_OPERATIONAL;
 2992                         break;
 2993                 case MFI_STATE_UNDEFINED:
 2994                         /*
 2995                          * This state should not last for more than 2
 2996                          * seconds
 2997                          */
 2998                         cur_state = MFI_STATE_UNDEFINED;
 2999                         break;
 3000                 case MFI_STATE_BB_INIT:
 3001                         cur_state = MFI_STATE_BB_INIT;
 3002                         break;
 3003                 case MFI_STATE_FW_INIT:
 3004                         cur_state = MFI_STATE_FW_INIT;
 3005                         break;
 3006                 case MFI_STATE_FW_INIT_2:
 3007                         cur_state = MFI_STATE_FW_INIT_2;
 3008                         break;
 3009                 case MFI_STATE_DEVICE_SCAN:
 3010                         cur_state = MFI_STATE_DEVICE_SCAN;
 3011                         break;
 3012                 case MFI_STATE_FLUSH_CACHE:
 3013                         cur_state = MFI_STATE_FLUSH_CACHE;
 3014                         break;
 3015                 default:
 3016                         device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
 3017                         return -ENODEV;
 3018                 }
 3019 
 3020                 /*
 3021                  * The cur_state should not last for more than max_wait secs
 3022                  */
 3023                 for (i = 0; i < (max_wait * 1000); i++) {
 3024                         fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
 3025                             outbound_scratch_pad)) & MFI_STATE_MASK);
 3026                         curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
 3027                             outbound_scratch_pad));
 3028                         if (abs_state == curr_abs_state)
 3029                                 DELAY(1000);
 3030                         else
 3031                                 break;
 3032                 }
 3033 
 3034                 /*
 3035                  * Return error if fw_state hasn't changed after max_wait
 3036                  */
 3037                 if (curr_abs_state == abs_state) {
 3038                         device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
 3039                             "in %d secs\n", fw_state, max_wait);
 3040                         return -ENODEV;
 3041                 }
 3042         }
 3043         mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
 3044         return 0;
 3045 }
 3046 
 3047 /*
 3048  * mrsas_get_mfi_cmd:   Get a cmd from free command pool
 3049  * input:                               Adapter soft state
 3050  *
 3051  * This function removes an MFI command from the command list.
 3052  */
 3053 struct mrsas_mfi_cmd *
 3054 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
 3055 {
 3056         struct mrsas_mfi_cmd *cmd = NULL;
 3057 
 3058         mtx_lock(&sc->mfi_cmd_pool_lock);
 3059         if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
 3060                 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
 3061                 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
 3062         }
 3063         mtx_unlock(&sc->mfi_cmd_pool_lock);
 3064 
 3065         return cmd;
 3066 }
 3067 
 3068 /*
 3069  * mrsas_ocr_thread:    Thread to handle OCR/Kill Adapter.
 3070  * input:                               Adapter Context.
 3071  *
 3072  * This function will check FW status register and flag do_timeout_reset flag.
 3073  * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
 3074  * trigger reset.
 3075  */
 3076 static void
 3077 mrsas_ocr_thread(void *arg)
 3078 {
 3079         struct mrsas_softc *sc;
 3080         u_int32_t fw_status, fw_state;
 3081         u_int8_t tm_target_reset_failed = 0;
 3082 
 3083         sc = (struct mrsas_softc *)arg;
 3084 
 3085         mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
 3086         sc->ocr_thread_active = 1;
 3087         mtx_lock(&sc->sim_lock);
 3088         for (;;) {
 3089                 /* Sleep for 1 second and check the queue status */
 3090                 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
 3091                     "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
 3092                 if (sc->remove_in_progress ||
 3093                     sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
 3094                         mrsas_dprint(sc, MRSAS_OCR,
 3095                             "Exit due to %s from %s\n",
 3096                             sc->remove_in_progress ? "Shutdown" :
 3097                             "Hardware critical error", __func__);
 3098                         break;
 3099                 }
 3100                 fw_status = mrsas_read_reg_with_retries(sc,
 3101                     offsetof(mrsas_reg_set, outbound_scratch_pad));
 3102                 fw_state = fw_status & MFI_STATE_MASK;
 3103                 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
 3104                         mrsas_atomic_read(&sc->target_reset_outstanding)) {
 3105                         /* First, freeze further IOs to come to the SIM */
 3106                         mrsas_xpt_freeze(sc);
 3107 
 3108                         /* If this is an IO timeout then go for target reset */
 3109                         if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
 3110                                 device_printf(sc->mrsas_dev, "Initiating Target RESET "
 3111                                     "because of SCSI IO timeout!\n");
 3112 
 3113                                 /* Let the remaining IOs to complete */
 3114                                 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
 3115                                       "mrsas_reset_targets", 5 * hz);
 3116 
 3117                                 /* Try to reset the target device */
 3118                                 if (mrsas_reset_targets(sc) == FAIL)
 3119                                         tm_target_reset_failed = 1;
 3120                         }
 3121 
 3122                         /* If this is a DCMD timeout or FW fault,
 3123                          * then go for controller reset
 3124                          */
 3125                         if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
 3126                             (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
 3127                                 if (tm_target_reset_failed)
 3128                                         device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
 3129                                             "TM FAILURE!\n");
 3130                                 else
 3131                                         device_printf(sc->mrsas_dev, "Initiaiting OCR "
 3132                                                 "because of %s!\n", sc->do_timedout_reset ?
 3133                                                 "DCMD IO Timeout" : "FW fault");
 3134 
 3135                                 mtx_lock_spin(&sc->ioctl_lock);
 3136                                 sc->reset_in_progress = 1;
 3137                                 mtx_unlock_spin(&sc->ioctl_lock);
 3138                                 sc->reset_count++;
 3139                                 
 3140                                 /*
 3141                                  * Wait for the AEN task to be completed if it is running.
 3142                                  */
 3143                                 mtx_unlock(&sc->sim_lock);
 3144                                 taskqueue_drain(sc->ev_tq, &sc->ev_task);
 3145                                 mtx_lock(&sc->sim_lock);
 3146 
 3147                                 taskqueue_block(sc->ev_tq);
 3148                                 /* Try to reset the controller */
 3149                                 mrsas_reset_ctrl(sc, sc->do_timedout_reset);
 3150 
 3151                                 sc->do_timedout_reset = 0;
 3152                                 sc->reset_in_progress = 0;
 3153                                 tm_target_reset_failed = 0;
 3154                                 mrsas_atomic_set(&sc->target_reset_outstanding, 0);
 3155                                 memset(sc->target_reset_pool, 0,
 3156                                     sizeof(sc->target_reset_pool));
 3157                                 taskqueue_unblock(sc->ev_tq);
 3158                         }
 3159 
 3160                         /* Now allow IOs to come to the SIM */
 3161                          mrsas_xpt_release(sc);
 3162                 }
 3163         }
 3164         mtx_unlock(&sc->sim_lock);
 3165         sc->ocr_thread_active = 0;
 3166         mrsas_kproc_exit(0);
 3167 }
 3168 
 3169 /*
 3170  * mrsas_reset_reply_desc:      Reset Reply descriptor as part of OCR.
 3171  * input:                                       Adapter Context.
 3172  *
 3173  * This function will clear reply descriptor so that post OCR driver and FW will
 3174  * lost old history.
 3175  */
 3176 void
 3177 mrsas_reset_reply_desc(struct mrsas_softc *sc)
 3178 {
 3179         int i, count;
 3180         pMpi2ReplyDescriptorsUnion_t reply_desc;
 3181 
 3182         count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
 3183         for (i = 0; i < count; i++)
 3184                 sc->last_reply_idx[i] = 0;
 3185 
 3186         reply_desc = sc->reply_desc_mem;
 3187         for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
 3188                 reply_desc->Words = MRSAS_ULONG_MAX;
 3189         }
 3190 }
 3191 
 3192 /*
 3193  * mrsas_reset_ctrl:    Core function to OCR/Kill adapter.
 3194  * input:                               Adapter Context.
 3195  *
 3196  * This function will run from thread context so that it can sleep. 1. Do not
 3197  * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
 3198  * to complete for 180 seconds. 3. If #2 does not find any outstanding
 3199  * command Controller is in working state, so skip OCR. Otherwise, do
 3200  * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
 3201  * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
 3202  * OCR, Re-fire Management command and move Controller to Operation state.
 3203  */
 3204 int
 3205 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
 3206 {
 3207         int retval = SUCCESS, i, j, retry = 0;
 3208         u_int32_t host_diag, abs_state, status_reg, reset_adapter;
 3209         union ccb *ccb;
 3210         struct mrsas_mfi_cmd *mfi_cmd;
 3211         struct mrsas_mpt_cmd *mpt_cmd;
 3212         union mrsas_evt_class_locale class_locale;
 3213         MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
 3214 
 3215         if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
 3216                 device_printf(sc->mrsas_dev,
 3217                     "mrsas: Hardware critical error, returning FAIL.\n");
 3218                 return FAIL;
 3219         }
 3220         mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
 3221         sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
 3222         mrsas_disable_intr(sc);
 3223         msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
 3224             sc->mrsas_fw_fault_check_delay * hz);
 3225 
 3226         /* First try waiting for commands to complete */
 3227         if (mrsas_wait_for_outstanding(sc, reset_reason)) {
 3228                 mrsas_dprint(sc, MRSAS_OCR,
 3229                     "resetting adapter from %s.\n",
 3230                     __func__);
 3231                 /* Now return commands back to the CAM layer */
 3232                 mtx_unlock(&sc->sim_lock);
 3233                 for (i = 0; i < sc->max_fw_cmds; i++) {
 3234                         mpt_cmd = sc->mpt_cmd_list[i];
 3235 
 3236                         if (mpt_cmd->peer_cmd) {
 3237                                 mrsas_dprint(sc, MRSAS_OCR,
 3238                                     "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n",
 3239                                     i, mpt_cmd, mpt_cmd->peer_cmd);
 3240                         }
 3241 
 3242                         if (mpt_cmd->ccb_ptr) {
 3243                                 if (mpt_cmd->callout_owner) {
 3244                                         ccb = (union ccb *)(mpt_cmd->ccb_ptr);
 3245                                         ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
 3246                                         mrsas_cmd_done(sc, mpt_cmd);
 3247                                 } else {
 3248                                         mpt_cmd->ccb_ptr = NULL;
 3249                                         mrsas_release_mpt_cmd(mpt_cmd);
 3250                                 }
 3251                         }
 3252                 }
 3253 
 3254                 mrsas_atomic_set(&sc->fw_outstanding, 0);
 3255 
 3256                 mtx_lock(&sc->sim_lock);
 3257 
 3258                 status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
 3259                     outbound_scratch_pad));
 3260                 abs_state = status_reg & MFI_STATE_MASK;
 3261                 reset_adapter = status_reg & MFI_RESET_ADAPTER;
 3262                 if (sc->disableOnlineCtrlReset ||
 3263                     (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
 3264                         /* Reset not supported, kill adapter */
 3265                         mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
 3266                         mrsas_kill_hba(sc);
 3267                         retval = FAIL;
 3268                         goto out;
 3269                 }
 3270                 /* Now try to reset the chip */
 3271                 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
 3272                         mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
 3273                             MPI2_WRSEQ_FLUSH_KEY_VALUE);
 3274                         mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
 3275                             MPI2_WRSEQ_1ST_KEY_VALUE);
 3276                         mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
 3277                             MPI2_WRSEQ_2ND_KEY_VALUE);
 3278                         mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
 3279                             MPI2_WRSEQ_3RD_KEY_VALUE);
 3280                         mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
 3281                             MPI2_WRSEQ_4TH_KEY_VALUE);
 3282                         mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
 3283                             MPI2_WRSEQ_5TH_KEY_VALUE);
 3284                         mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
 3285                             MPI2_WRSEQ_6TH_KEY_VALUE);
 3286 
 3287                         /* Check that the diag write enable (DRWE) bit is on */
 3288                         host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
 3289                             fusion_host_diag));
 3290                         retry = 0;
 3291                         while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
 3292                                 DELAY(100 * 1000);
 3293                                 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
 3294                                     fusion_host_diag));
 3295                                 if (retry++ == 100) {
 3296                                         mrsas_dprint(sc, MRSAS_OCR,
 3297                                             "Host diag unlock failed!\n");
 3298                                         break;
 3299                                 }
 3300                         }
 3301                         if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
 3302                                 continue;
 3303 
 3304                         /* Send chip reset command */
 3305                         mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
 3306                             host_diag | HOST_DIAG_RESET_ADAPTER);
 3307                         DELAY(3000 * 1000);
 3308 
 3309                         /* Make sure reset adapter bit is cleared */
 3310                         host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
 3311                             fusion_host_diag));
 3312                         retry = 0;
 3313                         while (host_diag & HOST_DIAG_RESET_ADAPTER) {
 3314                                 DELAY(100 * 1000);
 3315                                 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
 3316                                     fusion_host_diag));
 3317                                 if (retry++ == 1000) {
 3318                                         mrsas_dprint(sc, MRSAS_OCR,
 3319                                             "Diag reset adapter never cleared!\n");
 3320                                         break;
 3321                                 }
 3322                         }
 3323                         if (host_diag & HOST_DIAG_RESET_ADAPTER)
 3324                                 continue;
 3325 
 3326                         abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
 3327                             outbound_scratch_pad)) & MFI_STATE_MASK;
 3328                         retry = 0;
 3329 
 3330                         while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
 3331                                 DELAY(100 * 1000);
 3332                                 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
 3333                                     outbound_scratch_pad)) & MFI_STATE_MASK;
 3334                         }
 3335                         if (abs_state <= MFI_STATE_FW_INIT) {
 3336                                 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
 3337                                     " state = 0x%x\n", abs_state);
 3338                                 continue;
 3339                         }
 3340                         /* Wait for FW to become ready */
 3341                         if (mrsas_transition_to_ready(sc, 1)) {
 3342                                 mrsas_dprint(sc, MRSAS_OCR,
 3343                                     "mrsas: Failed to transition controller to ready.\n");
 3344                                 continue;
 3345                         }
 3346                         mrsas_reset_reply_desc(sc);
 3347                         if (mrsas_ioc_init(sc)) {
 3348                                 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
 3349                                 continue;
 3350                         }
 3351                         for (j = 0; j < sc->max_fw_cmds; j++) {
 3352                                 mpt_cmd = sc->mpt_cmd_list[j];
 3353                                 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
 3354                                         mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
 3355                                         /* If not an IOCTL then release the command else re-fire */
 3356                                         if (!mfi_cmd->sync_cmd) {
 3357                                                 mrsas_release_mfi_cmd(mfi_cmd);
 3358                                         } else {
 3359                                                 req_desc = mrsas_get_request_desc(sc,
 3360                                                     mfi_cmd->cmd_id.context.smid - 1);
 3361                                                 mrsas_dprint(sc, MRSAS_OCR,
 3362                                                     "Re-fire command DCMD opcode 0x%x index %d\n ",
 3363                                                     mfi_cmd->frame->dcmd.opcode, j);
 3364                                                 if (!req_desc)
 3365                                                         device_printf(sc->mrsas_dev, 
 3366                                                             "Cannot build MPT cmd.\n");
 3367                                                 else
 3368                                                         mrsas_fire_cmd(sc, req_desc->addr.u.low,
 3369                                                             req_desc->addr.u.high);
 3370                                         }
 3371                                 }
 3372                         }
 3373 
 3374                         /* Reset load balance info */
 3375                         memset(sc->load_balance_info, 0,
 3376                             sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
 3377 
 3378                         if (mrsas_get_ctrl_info(sc)) {
 3379                                 mrsas_kill_hba(sc);
 3380                                 retval = FAIL;
 3381                                 goto out;
 3382                         }
 3383                         if (!mrsas_get_map_info(sc))
 3384                                 mrsas_sync_map_info(sc);
 3385 
 3386                         megasas_setup_jbod_map(sc);
 3387 
 3388                         if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
 3389                                 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
 3390                                         memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
 3391                                         sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
 3392                                 }
 3393                         }
 3394 
 3395                         mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
 3396                         mrsas_enable_intr(sc);
 3397                         sc->adprecovery = MRSAS_HBA_OPERATIONAL;
 3398 
 3399                         /* Register AEN with FW for last sequence number */
 3400                         class_locale.members.reserved = 0;
 3401                         class_locale.members.locale = MR_EVT_LOCALE_ALL;
 3402                         class_locale.members.class = MR_EVT_CLASS_DEBUG;
 3403 
 3404                         mtx_unlock(&sc->sim_lock);
 3405                         if (mrsas_register_aen(sc, sc->last_seq_num,
 3406                             class_locale.word)) {
 3407                                 device_printf(sc->mrsas_dev,
 3408                                     "ERROR: AEN registration FAILED from OCR !!! "
 3409                                     "Further events from the controller cannot be notified."
 3410                                     "Either there is some problem in the controller"
 3411                                     "or the controller does not support AEN.\n"
 3412                                     "Please contact to the SUPPORT TEAM if the problem persists\n");
 3413                         }
 3414                         mtx_lock(&sc->sim_lock);
 3415 
 3416                         /* Adapter reset completed successfully */
 3417                         device_printf(sc->mrsas_dev, "Reset successful\n");
 3418                         retval = SUCCESS;
 3419                         goto out;
 3420                 }
 3421                 /* Reset failed, kill the adapter */
 3422                 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
 3423                 mrsas_kill_hba(sc);
 3424                 retval = FAIL;
 3425         } else {
 3426                 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
 3427                 mrsas_enable_intr(sc);
 3428                 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
 3429         }
 3430 out:
 3431         mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
 3432         mrsas_dprint(sc, MRSAS_OCR,
 3433             "Reset Exit with %d.\n", retval);
 3434         return retval;
 3435 }
 3436 
 3437 /*
 3438  * mrsas_kill_hba:      Kill HBA when OCR is not supported
 3439  * input:                       Adapter Context.
 3440  *
 3441  * This function will kill HBA when OCR is not supported.
 3442  */
 3443 void
 3444 mrsas_kill_hba(struct mrsas_softc *sc)
 3445 {
 3446         sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
 3447         DELAY(1000 * 1000);
 3448         mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
 3449         mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
 3450             MFI_STOP_ADP);
 3451         /* Flush */
 3452         mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
 3453         mrsas_complete_outstanding_ioctls(sc);
 3454 }
 3455 
 3456 /**
 3457  * mrsas_complete_outstanding_ioctls    Complete pending IOCTLS after kill_hba
 3458  * input:                       Controller softc
 3459  *
 3460  * Returns void
 3461  */
 3462 void 
 3463 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
 3464 {
 3465         int i;
 3466         struct mrsas_mpt_cmd *cmd_mpt;
 3467         struct mrsas_mfi_cmd *cmd_mfi;
 3468         u_int32_t count, MSIxIndex;
 3469 
 3470         count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
 3471         for (i = 0; i < sc->max_fw_cmds; i++) {
 3472                 cmd_mpt = sc->mpt_cmd_list[i];
 3473 
 3474                 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
 3475                         cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
 3476                         if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
 3477                                 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
 3478                                         mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
 3479                                             cmd_mpt->io_request->RaidContext.raid_context.status);
 3480                         }
 3481                 }
 3482         }
 3483 }
 3484 
 3485 /*
 3486  * mrsas_wait_for_outstanding:  Wait for outstanding commands
 3487  * input:                                               Adapter Context.
 3488  *
 3489  * This function will wait for 180 seconds for outstanding commands to be
 3490  * completed.
 3491  */
 3492 int
 3493 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
 3494 {
 3495         int i, outstanding, retval = 0;
 3496         u_int32_t fw_state, count, MSIxIndex;
 3497 
 3498         for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
 3499                 if (sc->remove_in_progress) {
 3500                         mrsas_dprint(sc, MRSAS_OCR,
 3501                             "Driver remove or shutdown called.\n");
 3502                         retval = 1;
 3503                         goto out;
 3504                 }
 3505                 /* Check if firmware is in fault state */
 3506                 fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
 3507                     outbound_scratch_pad)) & MFI_STATE_MASK;
 3508                 if (fw_state == MFI_STATE_FAULT) {
 3509                         mrsas_dprint(sc, MRSAS_OCR,
 3510                             "Found FW in FAULT state, will reset adapter.\n");
 3511                         count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
 3512                         mtx_unlock(&sc->sim_lock);
 3513                         for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
 3514                                 mrsas_complete_cmd(sc, MSIxIndex);
 3515                         mtx_lock(&sc->sim_lock);
 3516                         retval = 1;
 3517                         goto out;
 3518                 }
 3519                 if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
 3520                         mrsas_dprint(sc, MRSAS_OCR,
 3521                             "DCMD IO TIMEOUT detected, will reset adapter.\n");
 3522                         retval = 1;
 3523                         goto out;
 3524                 }
 3525                 outstanding = mrsas_atomic_read(&sc->fw_outstanding);
 3526                 if (!outstanding)
 3527                         goto out;
 3528 
 3529                 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
 3530                         mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
 3531                             "commands to complete\n", i, outstanding);
 3532                         count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
 3533                         mtx_unlock(&sc->sim_lock);
 3534                         for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
 3535                                 mrsas_complete_cmd(sc, MSIxIndex);
 3536                         mtx_lock(&sc->sim_lock);
 3537                 }
 3538                 DELAY(1000 * 1000);
 3539         }
 3540 
 3541         if (mrsas_atomic_read(&sc->fw_outstanding)) {
 3542                 mrsas_dprint(sc, MRSAS_OCR,
 3543                     " pending commands remain after waiting,"
 3544                     " will reset adapter.\n");
 3545                 retval = 1;
 3546         }
 3547 out:
 3548         return retval;
 3549 }
 3550 
 3551 /*
 3552  * mrsas_release_mfi_cmd:       Return a cmd to free command pool
 3553  * input:                                       Command packet for return to free cmd pool
 3554  *
 3555  * This function returns the MFI & MPT command to the command list.
 3556  */
 3557 void
 3558 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
 3559 {
 3560         struct mrsas_softc *sc = cmd_mfi->sc;
 3561         struct mrsas_mpt_cmd *cmd_mpt;
 3562 
 3563         mtx_lock(&sc->mfi_cmd_pool_lock);
 3564         /*
 3565          * Release the mpt command (if at all it is allocated
 3566          * associated with the mfi command
 3567          */
 3568         if (cmd_mfi->cmd_id.context.smid) {
 3569                 mtx_lock(&sc->mpt_cmd_pool_lock);
 3570                 /* Get the mpt cmd from mfi cmd frame's smid value */
 3571                 cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
 3572                 cmd_mpt->flags = 0;
 3573                 cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
 3574                 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
 3575                 mtx_unlock(&sc->mpt_cmd_pool_lock);
 3576         }
 3577         /* Release the mfi command */
 3578         cmd_mfi->ccb_ptr = NULL;
 3579         cmd_mfi->cmd_id.frame_count = 0;
 3580         TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
 3581         mtx_unlock(&sc->mfi_cmd_pool_lock);
 3582 
 3583         return;
 3584 }
 3585 
 3586 /*
 3587  * mrsas_get_controller_info:   Returns FW's controller structure
 3588  * input:                                               Adapter soft state
 3589  *                                                              Controller information structure
 3590  *
 3591  * Issues an internal command (DCMD) to get the FW's controller structure. This
 3592  * information is mainly used to find out the maximum IO transfer per command
 3593  * supported by the FW.
 3594  */
 3595 static int
 3596 mrsas_get_ctrl_info(struct mrsas_softc *sc)
 3597 {
 3598         int retcode = 0;
 3599         u_int8_t do_ocr = 1;
 3600         struct mrsas_mfi_cmd *cmd;
 3601         struct mrsas_dcmd_frame *dcmd;
 3602 
 3603         cmd = mrsas_get_mfi_cmd(sc);
 3604 
 3605         if (!cmd) {
 3606                 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
 3607                 return -ENOMEM;
 3608         }
 3609         dcmd = &cmd->frame->dcmd;
 3610 
 3611         if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
 3612                 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
 3613                 mrsas_release_mfi_cmd(cmd);
 3614                 return -ENOMEM;
 3615         }
 3616         memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
 3617 
 3618         dcmd->cmd = MFI_CMD_DCMD;
 3619         dcmd->cmd_status = 0xFF;
 3620         dcmd->sge_count = 1;
 3621         dcmd->flags = MFI_FRAME_DIR_READ;
 3622         dcmd->timeout = 0;
 3623         dcmd->pad_0 = 0;
 3624         dcmd->data_xfer_len = htole32(sizeof(struct mrsas_ctrl_info));
 3625         dcmd->opcode = htole32(MR_DCMD_CTRL_GET_INFO);
 3626         dcmd->sgl.sge32[0].phys_addr = htole32(sc->ctlr_info_phys_addr & 0xFFFFFFFF);
 3627         dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_ctrl_info));
 3628 
 3629         if (!sc->mask_interrupts)
 3630                 retcode = mrsas_issue_blocked_cmd(sc, cmd);
 3631         else
 3632                 retcode = mrsas_issue_polled(sc, cmd);
 3633 
 3634         if (retcode == ETIMEDOUT)
 3635                 goto dcmd_timeout;
 3636         else {
 3637                 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
 3638                 le32_to_cpus(&sc->ctrl_info->properties.OnOffProperties);
 3639                 le32_to_cpus(&sc->ctrl_info->adapterOperations2);
 3640                 le32_to_cpus(&sc->ctrl_info->adapterOperations3);
 3641                 le16_to_cpus(&sc->ctrl_info->adapterOperations4);
 3642         }
 3643 
 3644         do_ocr = 0;
 3645         mrsas_update_ext_vd_details(sc);
 3646 
 3647         sc->use_seqnum_jbod_fp =
 3648             sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
 3649         sc->support_morethan256jbod =
 3650                 sc->ctrl_info->adapterOperations4.supportPdMapTargetId;
 3651 
 3652         sc->disableOnlineCtrlReset =
 3653             sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
 3654 
 3655 dcmd_timeout:
 3656         mrsas_free_ctlr_info_cmd(sc);
 3657 
 3658         if (do_ocr)
 3659                 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
 3660 
 3661         if (!sc->mask_interrupts)
 3662                 mrsas_release_mfi_cmd(cmd);
 3663 
 3664         return (retcode);
 3665 }
 3666 
 3667 /*
 3668  * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
 3669  * input:
 3670  *      sc - Controller's softc
 3671 */
 3672 static void 
 3673 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
 3674 {
 3675         u_int32_t ventura_map_sz = 0;
 3676         sc->max256vdSupport =
 3677                 sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
 3678 
 3679         /* Below is additional check to address future FW enhancement */
 3680         if (sc->ctrl_info->max_lds > 64)
 3681                 sc->max256vdSupport = 1;
 3682 
 3683         sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
 3684             * MRSAS_MAX_DEV_PER_CHANNEL;
 3685         sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
 3686             * MRSAS_MAX_DEV_PER_CHANNEL;
 3687         if (sc->max256vdSupport) {
 3688                 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
 3689                 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
 3690         } else {
 3691                 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
 3692                 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
 3693         }
 3694 
 3695         if (sc->maxRaidMapSize) {
 3696                 ventura_map_sz = sc->maxRaidMapSize *
 3697                     MR_MIN_MAP_SIZE;
 3698                 sc->current_map_sz = ventura_map_sz;
 3699                 sc->max_map_sz = ventura_map_sz;
 3700         } else {
 3701                 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
 3702                     (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1));
 3703                 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
 3704                 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
 3705                 if (sc->max256vdSupport)
 3706                         sc->current_map_sz = sc->new_map_sz;
 3707                 else
 3708                         sc->current_map_sz = sc->old_map_sz;
 3709         }
 3710 
 3711         sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL);
 3712 #if VD_EXT_DEBUG
 3713         device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n",
 3714             sc->maxRaidMapSize);
 3715         device_printf(sc->mrsas_dev,
 3716             "new_map_sz = 0x%x, old_map_sz = 0x%x, "
 3717             "ventura_map_sz = 0x%x, current_map_sz = 0x%x "
 3718             "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n",
 3719             sc->new_map_sz, sc->old_map_sz, ventura_map_sz,
 3720             sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL));
 3721 #endif
 3722 }
 3723 
 3724 /*
 3725  * mrsas_alloc_ctlr_info_cmd:   Allocates memory for controller info command
 3726  * input:                                               Adapter soft state
 3727  *
 3728  * Allocates DMAable memory for the controller info internal command.
 3729  */
 3730 int
 3731 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
 3732 {
 3733         int ctlr_info_size;
 3734 
 3735         /* Allocate get controller info command */
 3736         ctlr_info_size = sizeof(struct mrsas_ctrl_info);
 3737         if (bus_dma_tag_create(sc->mrsas_parent_tag,
 3738             1, 0,
 3739             BUS_SPACE_MAXADDR_32BIT,
 3740             BUS_SPACE_MAXADDR,
 3741             NULL, NULL,
 3742             ctlr_info_size,
 3743             1,
 3744             ctlr_info_size,
 3745             BUS_DMA_ALLOCNOW,
 3746             NULL, NULL,
 3747             &sc->ctlr_info_tag)) {
 3748                 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
 3749                 return (ENOMEM);
 3750         }
 3751         if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
 3752             BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
 3753                 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
 3754                 return (ENOMEM);
 3755         }
 3756         if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
 3757             sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
 3758             &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
 3759                 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
 3760                 return (ENOMEM);
 3761         }
 3762         memset(sc->ctlr_info_mem, 0, ctlr_info_size);
 3763         return (0);
 3764 }
 3765 
 3766 /*
 3767  * mrsas_free_ctlr_info_cmd:    Free memory for controller info command
 3768  * input:                                               Adapter soft state
 3769  *
 3770  * Deallocates memory of the get controller info cmd.
 3771  */
 3772 void
 3773 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
 3774 {
 3775         if (sc->ctlr_info_phys_addr)
 3776                 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
 3777         if (sc->ctlr_info_mem != NULL)
 3778                 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
 3779         if (sc->ctlr_info_tag != NULL)
 3780                 bus_dma_tag_destroy(sc->ctlr_info_tag);
 3781 }
 3782 
 3783 /*
 3784  * mrsas_issue_polled:  Issues a polling command
 3785  * inputs:                              Adapter soft state
 3786  *                                              Command packet to be issued
 3787  *
 3788  * This function is for posting of internal commands to Firmware.  MFI requires
 3789  * the cmd_status to be set to 0xFF before posting.  The maximun wait time of
 3790  * the poll response timer is 180 seconds.
 3791  */
 3792 int
 3793 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
 3794 {
 3795         struct mrsas_header *frame_hdr = &cmd->frame->hdr;
 3796         u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
 3797         int i, retcode = SUCCESS;
 3798 
 3799         frame_hdr->cmd_status = 0xFF;
 3800         frame_hdr->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
 3801 
 3802         /* Issue the frame using inbound queue port */
 3803         if (mrsas_issue_dcmd(sc, cmd)) {
 3804                 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
 3805                 return (1);
 3806         }
 3807         /*
 3808          * Poll response timer to wait for Firmware response.  While this
 3809          * timer with the DELAY call could block CPU, the time interval for
 3810          * this is only 1 millisecond.
 3811          */
 3812         if (frame_hdr->cmd_status == 0xFF) {
 3813                 for (i = 0; i < (max_wait * 1000); i++) {
 3814                         if (frame_hdr->cmd_status == 0xFF)
 3815                                 DELAY(1000);
 3816                         else
 3817                                 break;
 3818                 }
 3819         }
 3820         if (frame_hdr->cmd_status == 0xFF) {
 3821                 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
 3822                     "seconds from %s\n", max_wait, __func__);
 3823                 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
 3824                     cmd->frame->dcmd.opcode);
 3825                 retcode = ETIMEDOUT;
 3826         }
 3827         return (retcode);
 3828 }
 3829 
 3830 /*
 3831  * mrsas_issue_dcmd:    Issues a MFI Pass thru cmd
 3832  * input:                               Adapter soft state mfi cmd pointer
 3833  *
 3834  * This function is called by mrsas_issued_blocked_cmd() and
 3835  * mrsas_issued_polled(), to build the MPT command and then fire the command
 3836  * to Firmware.
 3837  */
 3838 int
 3839 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
 3840 {
 3841         MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
 3842 
 3843         req_desc = mrsas_build_mpt_cmd(sc, cmd);
 3844         if (!req_desc) {
 3845                 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
 3846                 return (1);
 3847         }
 3848         mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
 3849 
 3850         return (0);
 3851 }
 3852 
 3853 /*
 3854  * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd
 3855  * input:                               Adapter soft state mfi cmd to build
 3856  *
 3857  * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
 3858  * command and prepares the MPT command to send to Firmware.
 3859  */
 3860 MRSAS_REQUEST_DESCRIPTOR_UNION *
 3861 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
 3862 {
 3863         MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
 3864         u_int16_t index;
 3865 
 3866         if (mrsas_build_mptmfi_passthru(sc, cmd)) {
 3867                 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
 3868                 return NULL;
 3869         }
 3870         index = cmd->cmd_id.context.smid;
 3871 
 3872         req_desc = mrsas_get_request_desc(sc, index - 1);
 3873         if (!req_desc)
 3874                 return NULL;
 3875 
 3876         req_desc->addr.Words = 0;
 3877         req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 3878 
 3879         req_desc->SCSIIO.SMID = htole16(index);
 3880 
 3881         return (req_desc);
 3882 }
 3883 
 3884 /*
 3885  * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command
 3886  * input:                                               Adapter soft state mfi cmd pointer
 3887  *
 3888  * The MPT command and the io_request are setup as a passthru command. The SGE
 3889  * chain address is set to frame_phys_addr of the MFI command.
 3890  */
 3891 u_int8_t
 3892 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
 3893 {
 3894         MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
 3895         PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
 3896         struct mrsas_mpt_cmd *mpt_cmd;
 3897         struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
 3898 
 3899         mpt_cmd = mrsas_get_mpt_cmd(sc);
 3900         if (!mpt_cmd)
 3901                 return (1);
 3902 
 3903         /* Save the smid. To be used for returning the cmd */
 3904         mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
 3905 
 3906         mpt_cmd->sync_cmd_idx = mfi_cmd->index;
 3907 
 3908         /*
 3909          * For cmds where the flag is set, store the flag and check on
 3910          * completion. For cmds with this flag, don't call
 3911          * mrsas_complete_cmd.
 3912          */
 3913 
 3914         if (frame_hdr->flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
 3915                 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
 3916 
 3917         io_req = mpt_cmd->io_request;
 3918 
 3919         if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
 3920                 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
 3921 
 3922                 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
 3923                 sgl_ptr_end->Flags = 0;
 3924         }
 3925         mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
 3926 
 3927         io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
 3928         io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
 3929         io_req->ChainOffset = sc->chain_offset_mfi_pthru;
 3930 
 3931         mpi25_ieee_chain->Address = htole64(mfi_cmd->frame_phys_addr);
 3932 
 3933         mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
 3934             MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
 3935 
 3936         mpi25_ieee_chain->Length = htole32(sc->max_chain_frame_sz);
 3937 
 3938         return (0);
 3939 }
 3940 
 3941 /*
 3942  * mrsas_issue_blocked_cmd:     Synchronous wrapper around regular FW cmds
 3943  * input:                                       Adapter soft state Command to be issued
 3944  *
 3945  * This function waits on an event for the command to be returned from the ISR.
 3946  * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
 3947  * internal and ioctl commands.
 3948  */
 3949 int
 3950 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
 3951 {
 3952         u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
 3953         unsigned long total_time = 0;
 3954         int retcode = SUCCESS;
 3955 
 3956         /* Initialize cmd_status */
 3957         cmd->cmd_status = 0xFF;
 3958 
 3959         /* Build MPT-MFI command for issue to FW */
 3960         if (mrsas_issue_dcmd(sc, cmd)) {
 3961                 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
 3962                 return (1);
 3963         }
 3964         sc->chan = (void *)&cmd;
 3965 
 3966         while (1) {
 3967                 if (cmd->cmd_status == 0xFF) {
 3968                         tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
 3969                 } else
 3970                         break;
 3971 
 3972                 if (!cmd->sync_cmd) {   /* cmd->sync will be set for an IOCTL
 3973                                          * command */
 3974                         total_time++;
 3975                         if (total_time >= max_wait) {
 3976                                 device_printf(sc->mrsas_dev,
 3977                                     "Internal command timed out after %d seconds.\n", max_wait);
 3978                                 retcode = 1;
 3979                                 break;
 3980                         }
 3981                 }
 3982         }
 3983         sc->chan = NULL;
 3984 
 3985         if (cmd->cmd_status == 0xFF) {
 3986                 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
 3987                     "seconds from %s\n", max_wait, __func__);
 3988                 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
 3989                     cmd->frame->dcmd.opcode);
 3990                 retcode = ETIMEDOUT;
 3991         }
 3992         return (retcode);
 3993 }
 3994 
 3995 /*
 3996  * mrsas_complete_mptmfi_passthru:      Completes a command
 3997  * input:       @sc:                                    Adapter soft state
 3998  *                      @cmd:                                   Command to be completed
 3999  *                      @status:                                cmd completion status
 4000  *
 4001  * This function is called from mrsas_complete_cmd() after an interrupt is
 4002  * received from Firmware, and io_request->Function is
 4003  * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
 4004  */
 4005 void
 4006 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
 4007     u_int8_t status)
 4008 {
 4009         struct mrsas_header *hdr = &cmd->frame->hdr;
 4010         u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
 4011 
 4012         /* Reset the retry counter for future re-tries */
 4013         cmd->retry_for_fw_reset = 0;
 4014 
 4015         if (cmd->ccb_ptr)
 4016                 cmd->ccb_ptr = NULL;
 4017 
 4018         switch (hdr->cmd) {
 4019         case MFI_CMD_INVALID:
 4020                 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
 4021                 break;
 4022         case MFI_CMD_PD_SCSI_IO:
 4023         case MFI_CMD_LD_SCSI_IO:
 4024                 /*
 4025                  * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
 4026                  * issued either through an IO path or an IOCTL path. If it
 4027                  * was via IOCTL, we will send it to internal completion.
 4028                  */
 4029                 if (cmd->sync_cmd) {
 4030                         cmd->sync_cmd = 0;
 4031                         mrsas_wakeup(sc, cmd);
 4032                         break;
 4033                 }
 4034         case MFI_CMD_SMP:
 4035         case MFI_CMD_STP:
 4036         case MFI_CMD_DCMD:
 4037                 /* Check for LD map update */
 4038                 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
 4039                     (cmd->frame->dcmd.mbox.b[1] == 1)) {
 4040                         sc->fast_path_io = 0;
 4041                         mtx_lock(&sc->raidmap_lock);
 4042                         sc->map_update_cmd = NULL;
 4043                         if (cmd_status != 0) {
 4044                                 if (cmd_status != MFI_STAT_NOT_FOUND)
 4045                                         device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
 4046                                 else {
 4047                                         mrsas_release_mfi_cmd(cmd);
 4048                                         mtx_unlock(&sc->raidmap_lock);
 4049                                         break;
 4050                                 }
 4051                         } else
 4052                                 sc->map_id++;
 4053                         mrsas_release_mfi_cmd(cmd);
 4054                         if (MR_ValidateMapInfo(sc))
 4055                                 sc->fast_path_io = 0;
 4056                         else
 4057                                 sc->fast_path_io = 1;
 4058                         mrsas_sync_map_info(sc);
 4059                         mtx_unlock(&sc->raidmap_lock);
 4060                         break;
 4061                 }
 4062                 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
 4063                     cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
 4064                         sc->mrsas_aen_triggered = 0;
 4065                 }
 4066                 /* FW has an updated PD sequence */
 4067                 if ((cmd->frame->dcmd.opcode ==
 4068                     MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
 4069                     (cmd->frame->dcmd.mbox.b[0] == 1)) {
 4070                         mtx_lock(&sc->raidmap_lock);
 4071                         sc->jbod_seq_cmd = NULL;
 4072                         mrsas_release_mfi_cmd(cmd);
 4073 
 4074                         if (cmd_status == MFI_STAT_OK) {
 4075                                 sc->pd_seq_map_id++;
 4076                                 /* Re-register a pd sync seq num cmd */
 4077                                 if (megasas_sync_pd_seq_num(sc, true))
 4078                                         sc->use_seqnum_jbod_fp = 0;
 4079                         } else {
 4080                                 sc->use_seqnum_jbod_fp = 0;
 4081                                 device_printf(sc->mrsas_dev,
 4082                                     "Jbod map sync failed, status=%x\n", cmd_status);
 4083                         }
 4084                         mtx_unlock(&sc->raidmap_lock);
 4085                         break;
 4086                 }
 4087                 /* See if got an event notification */
 4088                 if (le32toh(cmd->frame->dcmd.opcode) == MR_DCMD_CTRL_EVENT_WAIT)
 4089                         mrsas_complete_aen(sc, cmd);
 4090                 else
 4091                         mrsas_wakeup(sc, cmd);
 4092                 break;
 4093         case MFI_CMD_ABORT:
 4094                 /* Command issued to abort another cmd return */
 4095                 mrsas_complete_abort(sc, cmd);
 4096                 break;
 4097         default:
 4098                 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
 4099                 break;
 4100         }
 4101 }
 4102 
 4103 /*
 4104  * mrsas_wakeup:        Completes an internal command
 4105  * input:                       Adapter soft state
 4106  *                                      Command to be completed
 4107  *
 4108  * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
 4109  * timer is started.  This function is called from
 4110  * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
 4111  * from the command wait.
 4112  */
 4113 void
 4114 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
 4115 {
 4116         cmd->cmd_status = cmd->frame->io.cmd_status;
 4117 
 4118         if (cmd->cmd_status == 0xFF)
 4119                 cmd->cmd_status = 0;
 4120 
 4121         sc->chan = (void *)&cmd;
 4122         wakeup_one((void *)&sc->chan);
 4123         return;
 4124 }
 4125 
 4126 /*
 4127  * mrsas_shutdown_ctlr:       Instructs FW to shutdown the controller input:
 4128  * Adapter soft state Shutdown/Hibernate
 4129  *
 4130  * This function issues a DCMD internal command to Firmware to initiate shutdown
 4131  * of the controller.
 4132  */
 4133 static void
 4134 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
 4135 {
 4136         struct mrsas_mfi_cmd *cmd;
 4137         struct mrsas_dcmd_frame *dcmd;
 4138 
 4139         if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
 4140                 return;
 4141 
 4142         cmd = mrsas_get_mfi_cmd(sc);
 4143         if (!cmd) {
 4144                 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
 4145                 return;
 4146         }
 4147         if (sc->aen_cmd)
 4148                 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
 4149         if (sc->map_update_cmd)
 4150                 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
 4151         if (sc->jbod_seq_cmd)
 4152                 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
 4153 
 4154         dcmd = &cmd->frame->dcmd;
 4155         memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
 4156 
 4157         dcmd->cmd = MFI_CMD_DCMD;
 4158         dcmd->cmd_status = 0x0;
 4159         dcmd->sge_count = 0;
 4160         dcmd->flags = MFI_FRAME_DIR_NONE;
 4161         dcmd->timeout = 0;
 4162         dcmd->pad_0 = 0;
 4163         dcmd->data_xfer_len = 0;
 4164         dcmd->opcode = opcode;
 4165 
 4166         device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
 4167 
 4168         mrsas_issue_blocked_cmd(sc, cmd);
 4169         mrsas_release_mfi_cmd(cmd);
 4170 
 4171         return;
 4172 }
 4173 
 4174 /*
 4175  * mrsas_flush_cache:         Requests FW to flush all its caches input:
 4176  * Adapter soft state
 4177  *
 4178  * This function is issues a DCMD internal command to Firmware to initiate
 4179  * flushing of all caches.
 4180  */
 4181 static void
 4182 mrsas_flush_cache(struct mrsas_softc *sc)
 4183 {
 4184         struct mrsas_mfi_cmd *cmd;
 4185         struct mrsas_dcmd_frame *dcmd;
 4186 
 4187         if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
 4188                 return;
 4189 
 4190         cmd = mrsas_get_mfi_cmd(sc);
 4191         if (!cmd) {
 4192                 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
 4193                 return;
 4194         }
 4195         dcmd = &cmd->frame->dcmd;
 4196         memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
 4197 
 4198         dcmd->cmd = MFI_CMD_DCMD;
 4199         dcmd->cmd_status = 0x0;
 4200         dcmd->sge_count = 0;
 4201         dcmd->flags = MFI_FRAME_DIR_NONE;
 4202         dcmd->timeout = 0;
 4203         dcmd->pad_0 = 0;
 4204         dcmd->data_xfer_len = 0;
 4205         dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
 4206         dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
 4207 
 4208         mrsas_issue_blocked_cmd(sc, cmd);
 4209         mrsas_release_mfi_cmd(cmd);
 4210 
 4211         return;
 4212 }
 4213 
 4214 int
 4215 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
 4216 {
 4217         int retcode = 0;
 4218         u_int8_t do_ocr = 1;
 4219         struct mrsas_mfi_cmd *cmd;
 4220         struct mrsas_dcmd_frame *dcmd;
 4221         uint32_t pd_seq_map_sz;
 4222         struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
 4223         bus_addr_t pd_seq_h;
 4224 
 4225         pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
 4226             (sizeof(struct MR_PD_CFG_SEQ) *
 4227             (MAX_PHYSICAL_DEVICES - 1));
 4228 
 4229         cmd = mrsas_get_mfi_cmd(sc);
 4230         if (!cmd) {
 4231                 device_printf(sc->mrsas_dev,
 4232                     "Cannot alloc for ld map info cmd.\n");
 4233                 return 1;
 4234         }
 4235         dcmd = &cmd->frame->dcmd;
 4236 
 4237         pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
 4238         pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
 4239         if (!pd_sync) {
 4240                 device_printf(sc->mrsas_dev,
 4241                     "Failed to alloc mem for jbod map info.\n");
 4242                 mrsas_release_mfi_cmd(cmd);
 4243                 return (ENOMEM);
 4244         }
 4245         memset(pd_sync, 0, pd_seq_map_sz);
 4246         memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
 4247         dcmd->cmd = MFI_CMD_DCMD;
 4248         dcmd->cmd_status = 0xFF;
 4249         dcmd->sge_count = 1;
 4250         dcmd->timeout = 0;
 4251         dcmd->pad_0 = 0;
 4252         dcmd->data_xfer_len = htole32(pd_seq_map_sz);
 4253         dcmd->opcode = htole32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
 4254         dcmd->sgl.sge32[0].phys_addr = htole32(pd_seq_h & 0xFFFFFFFF);
 4255         dcmd->sgl.sge32[0].length = htole32(pd_seq_map_sz);
 4256 
 4257         if (pend) {
 4258                 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
 4259                 dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
 4260                 sc->jbod_seq_cmd = cmd;
 4261                 if (mrsas_issue_dcmd(sc, cmd)) {
 4262                         device_printf(sc->mrsas_dev,
 4263                             "Fail to send sync map info command.\n");
 4264                         return 1;
 4265                 } else
 4266                         return 0;
 4267         } else
 4268                 dcmd->flags = htole16(MFI_FRAME_DIR_READ);
 4269 
 4270         retcode = mrsas_issue_polled(sc, cmd);
 4271         if (retcode == ETIMEDOUT)
 4272                 goto dcmd_timeout;
 4273 
 4274         if (le32toh(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
 4275                 device_printf(sc->mrsas_dev,
 4276                     "driver supports max %d JBOD, but FW reports %d\n",
 4277                     MAX_PHYSICAL_DEVICES, pd_sync->count);
 4278                 retcode = -EINVAL;
 4279         }
 4280         if (!retcode)
 4281                 sc->pd_seq_map_id++;
 4282         do_ocr = 0;
 4283 
 4284 dcmd_timeout:
 4285         if (do_ocr)
 4286                 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
 4287 
 4288         return (retcode);
 4289 }
 4290 
 4291 /*
 4292  * mrsas_get_map_info:        Load and validate RAID map input:
 4293  * Adapter instance soft state
 4294  *
 4295  * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
 4296  * and validate RAID map.  It returns 0 if successful, 1 other- wise.
 4297  */
 4298 static int
 4299 mrsas_get_map_info(struct mrsas_softc *sc)
 4300 {
 4301         uint8_t retcode = 0;
 4302 
 4303         sc->fast_path_io = 0;
 4304         if (!mrsas_get_ld_map_info(sc)) {
 4305                 retcode = MR_ValidateMapInfo(sc);
 4306                 if (retcode == 0) {
 4307                         sc->fast_path_io = 1;
 4308                         return 0;
 4309                 }
 4310         }
 4311         return 1;
 4312 }
 4313 
 4314 /*
 4315  * mrsas_get_ld_map_info:      Get FW's ld_map structure input:
 4316  * Adapter instance soft state
 4317  *
 4318  * Issues an internal command (DCMD) to get the FW's controller PD list
 4319  * structure.
 4320  */
 4321 static int
 4322 mrsas_get_ld_map_info(struct mrsas_softc *sc)
 4323 {
 4324         int retcode = 0;
 4325         struct mrsas_mfi_cmd *cmd;
 4326         struct mrsas_dcmd_frame *dcmd;
 4327         void *map;
 4328         bus_addr_t map_phys_addr = 0;
 4329 
 4330         cmd = mrsas_get_mfi_cmd(sc);
 4331         if (!cmd) {
 4332                 device_printf(sc->mrsas_dev,
 4333                     "Cannot alloc for ld map info cmd.\n");
 4334                 return 1;
 4335         }
 4336         dcmd = &cmd->frame->dcmd;
 4337 
 4338         map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
 4339         map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
 4340         if (!map) {
 4341                 device_printf(sc->mrsas_dev,
 4342                     "Failed to alloc mem for ld map info.\n");
 4343                 mrsas_release_mfi_cmd(cmd);
 4344                 return (ENOMEM);
 4345         }
 4346         memset(map, 0, sizeof(sc->max_map_sz));
 4347         memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
 4348 
 4349         dcmd->cmd = MFI_CMD_DCMD;
 4350         dcmd->cmd_status = 0xFF;
 4351         dcmd->sge_count = 1;
 4352         dcmd->flags = htole16(MFI_FRAME_DIR_READ);
 4353         dcmd->timeout = 0;
 4354         dcmd->pad_0 = 0;
 4355         dcmd->data_xfer_len = htole32(sc->current_map_sz);
 4356         dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
 4357         dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
 4358         dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
 4359 
 4360         retcode = mrsas_issue_polled(sc, cmd);
 4361         if (retcode == ETIMEDOUT)
 4362                 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
 4363 
 4364         return (retcode);
 4365 }
 4366 
 4367 /*
 4368  * mrsas_sync_map_info:        Get FW's ld_map structure input:
 4369  * Adapter instance soft state
 4370  *
 4371  * Issues an internal command (DCMD) to get the FW's controller PD list
 4372  * structure.
 4373  */
 4374 static int
 4375 mrsas_sync_map_info(struct mrsas_softc *sc)
 4376 {
 4377         int retcode = 0, i;
 4378         struct mrsas_mfi_cmd *cmd;
 4379         struct mrsas_dcmd_frame *dcmd;
 4380         uint32_t num_lds;
 4381         MR_LD_TARGET_SYNC *target_map = NULL;
 4382         MR_DRV_RAID_MAP_ALL *map;
 4383         MR_LD_RAID *raid;
 4384         MR_LD_TARGET_SYNC *ld_sync;
 4385         bus_addr_t map_phys_addr = 0;
 4386 
 4387         cmd = mrsas_get_mfi_cmd(sc);
 4388         if (!cmd) {
 4389                 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
 4390                 return ENOMEM;
 4391         }
 4392         map = sc->ld_drv_map[sc->map_id & 1];
 4393         num_lds = map->raidMap.ldCount;
 4394 
 4395         dcmd = &cmd->frame->dcmd;
 4396         memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
 4397 
 4398         target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
 4399         memset(target_map, 0, sc->max_map_sz);
 4400 
 4401         map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
 4402 
 4403         ld_sync = (MR_LD_TARGET_SYNC *) target_map;
 4404 
 4405         for (i = 0; i < num_lds; i++, ld_sync++) {
 4406                 raid = MR_LdRaidGet(i, map);
 4407                 ld_sync->targetId = MR_GetLDTgtId(i, map);
 4408                 ld_sync->seqNum = raid->seqNum;
 4409         }
 4410 
 4411         dcmd->cmd = MFI_CMD_DCMD;
 4412         dcmd->cmd_status = 0xFF;
 4413         dcmd->sge_count = 1;
 4414         dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
 4415         dcmd->timeout = 0;
 4416         dcmd->pad_0 = 0;
 4417         dcmd->data_xfer_len = htole32(sc->current_map_sz);
 4418         dcmd->mbox.b[0] = num_lds;
 4419         dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
 4420         dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
 4421         dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
 4422         dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
 4423 
 4424         sc->map_update_cmd = cmd;
 4425         if (mrsas_issue_dcmd(sc, cmd)) {
 4426                 device_printf(sc->mrsas_dev,
 4427                     "Fail to send sync map info command.\n");
 4428                 return (1);
 4429         }
 4430         return (retcode);
 4431 }
 4432 
 4433 /* Input:       dcmd.opcode             - MR_DCMD_PD_GET_INFO
 4434   *             dcmd.mbox.s[0]          - deviceId for this physical drive
 4435   *             dcmd.sge IN             - ptr to returned MR_PD_INFO structure
 4436   * Desc:       Firmware return the physical drive info structure
 4437   *
 4438   */
 4439 static void
 4440 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id)
 4441 {
 4442         int retcode;
 4443         u_int8_t do_ocr = 1;
 4444         struct mrsas_mfi_cmd *cmd;
 4445         struct mrsas_dcmd_frame *dcmd;
 4446 
 4447         cmd = mrsas_get_mfi_cmd(sc);
 4448 
 4449         if (!cmd) {
 4450                 device_printf(sc->mrsas_dev,
 4451                     "Cannot alloc for get PD info cmd\n");
 4452                 return;
 4453         }
 4454         dcmd = &cmd->frame->dcmd;
 4455 
 4456         memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info));
 4457         memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
 4458 
 4459         dcmd->mbox.s[0] = htole16(device_id);
 4460         dcmd->cmd = MFI_CMD_DCMD;
 4461         dcmd->cmd_status = 0xFF;
 4462         dcmd->sge_count = 1;
 4463         dcmd->flags = MFI_FRAME_DIR_READ;
 4464         dcmd->timeout = 0;
 4465         dcmd->pad_0 = 0;
 4466         dcmd->data_xfer_len = htole32(sizeof(struct mrsas_pd_info));
 4467         dcmd->opcode = htole32(MR_DCMD_PD_GET_INFO);
 4468         dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->pd_info_phys_addr & 0xFFFFFFFF);
 4469         dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_pd_info));
 4470 
 4471         if (!sc->mask_interrupts)
 4472                 retcode = mrsas_issue_blocked_cmd(sc, cmd);
 4473         else
 4474                 retcode = mrsas_issue_polled(sc, cmd);
 4475 
 4476         if (retcode == ETIMEDOUT)
 4477                 goto dcmd_timeout;
 4478 
 4479         sc->target_list[device_id].interface_type =
 4480                 le16toh(sc->pd_info_mem->state.ddf.pdType.intf);
 4481 
 4482         do_ocr = 0;
 4483 
 4484 dcmd_timeout:
 4485 
 4486         if (do_ocr)
 4487                 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
 4488 
 4489         if (!sc->mask_interrupts)
 4490                 mrsas_release_mfi_cmd(cmd);
 4491 }
 4492 
 4493 /*
 4494  * mrsas_add_target:                            Add target ID of system PD/VD to driver's data structure.
 4495  * sc:                                          Adapter's soft state
 4496  * target_id:                                   Unique target id per controller(managed by driver)
 4497  *                                              for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
 4498  *                                              for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
 4499  * return:                                      void
 4500  * Descripton:                                  This function will be called whenever system PD or VD is created.
 4501  */
 4502 static void mrsas_add_target(struct mrsas_softc *sc,
 4503         u_int16_t target_id)
 4504 {
 4505         sc->target_list[target_id].target_id = target_id;
 4506 
 4507         device_printf(sc->mrsas_dev,
 4508                 "%s created target ID: 0x%x\n",
 4509                 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
 4510                 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
 4511         /*
 4512          * If interrupts are enabled, then only fire DCMD to get pd_info
 4513          * for system PDs
 4514          */
 4515         if (!sc->mask_interrupts && sc->pd_info_mem &&
 4516                 (target_id < MRSAS_MAX_PD))
 4517                 mrsas_get_pd_info(sc, target_id);
 4518 
 4519 }
 4520 
 4521 /*
 4522  * mrsas_remove_target:                 Remove target ID of system PD/VD from driver's data structure.
 4523  * sc:                                          Adapter's soft state
 4524  * target_id:                                   Unique target id per controller(managed by driver)
 4525  *                                              for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
 4526  *                                              for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
 4527  * return:                                      void
 4528  * Descripton:                                  This function will be called whenever system PD or VD is deleted
 4529  */
 4530 static void mrsas_remove_target(struct mrsas_softc *sc,
 4531         u_int16_t target_id)
 4532 {
 4533         sc->target_list[target_id].target_id = 0xffff;
 4534         device_printf(sc->mrsas_dev,
 4535                 "%s deleted target ID: 0x%x\n",
 4536                 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
 4537                 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
 4538 }
 4539 
 4540 /*
 4541  * mrsas_get_pd_list:           Returns FW's PD list structure input:
 4542  * Adapter soft state
 4543  *
 4544  * Issues an internal command (DCMD) to get the FW's controller PD list
 4545  * structure.  This information is mainly used to find out about system
 4546  * supported by Firmware.
 4547  */
 4548 static int
 4549 mrsas_get_pd_list(struct mrsas_softc *sc)
 4550 {
 4551         int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
 4552         u_int8_t do_ocr = 1;
 4553         struct mrsas_mfi_cmd *cmd;
 4554         struct mrsas_dcmd_frame *dcmd;
 4555         struct MR_PD_LIST *pd_list_mem;
 4556         struct MR_PD_ADDRESS *pd_addr;
 4557         bus_addr_t pd_list_phys_addr = 0;
 4558         struct mrsas_tmp_dcmd *tcmd;
 4559         u_int16_t dev_id;
 4560 
 4561         cmd = mrsas_get_mfi_cmd(sc);
 4562         if (!cmd) {
 4563                 device_printf(sc->mrsas_dev,
 4564                     "Cannot alloc for get PD list cmd\n");
 4565                 return 1;
 4566         }
 4567         dcmd = &cmd->frame->dcmd;
 4568 
 4569         tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
 4570         pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
 4571         if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
 4572                 device_printf(sc->mrsas_dev,
 4573                     "Cannot alloc dmamap for get PD list cmd\n");
 4574                 mrsas_release_mfi_cmd(cmd);
 4575                 mrsas_free_tmp_dcmd(tcmd);
 4576                 free(tcmd, M_MRSAS);
 4577                 return (ENOMEM);
 4578         } else {
 4579                 pd_list_mem = tcmd->tmp_dcmd_mem;
 4580                 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
 4581         }
 4582         memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
 4583 
 4584         dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
 4585         dcmd->mbox.b[1] = 0;
 4586         dcmd->cmd = MFI_CMD_DCMD;
 4587         dcmd->cmd_status = 0xFF;
 4588         dcmd->sge_count = 1;
 4589         dcmd->flags = htole16(MFI_FRAME_DIR_READ);
 4590         dcmd->timeout = 0;
 4591         dcmd->pad_0 = 0;
 4592         dcmd->data_xfer_len = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
 4593         dcmd->opcode = htole32(MR_DCMD_PD_LIST_QUERY);
 4594         dcmd->sgl.sge32[0].phys_addr = htole32(pd_list_phys_addr & 0xFFFFFFFF);
 4595         dcmd->sgl.sge32[0].length = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
 4596 
 4597         if (!sc->mask_interrupts)
 4598                 retcode = mrsas_issue_blocked_cmd(sc, cmd);
 4599         else
 4600                 retcode = mrsas_issue_polled(sc, cmd);
 4601 
 4602         if (retcode == ETIMEDOUT)
 4603                 goto dcmd_timeout;
 4604 
 4605         /* Get the instance PD list */
 4606         pd_count = MRSAS_MAX_PD;
 4607         pd_addr = pd_list_mem->addr;
 4608         if (le32toh(pd_list_mem->count) < pd_count) {
 4609                 memset(sc->local_pd_list, 0,
 4610                     MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
 4611                 for (pd_index = 0; pd_index < le32toh(pd_list_mem->count); pd_index++) {
 4612                         dev_id = le16toh(pd_addr->deviceId);
 4613                         sc->local_pd_list[dev_id].tid = dev_id;
 4614                         sc->local_pd_list[dev_id].driveType =
 4615                             le16toh(pd_addr->scsiDevType);
 4616                         sc->local_pd_list[dev_id].driveState =
 4617                             MR_PD_STATE_SYSTEM;
 4618                         if (sc->target_list[dev_id].target_id == 0xffff)
 4619                                 mrsas_add_target(sc, dev_id);
 4620                         pd_addr++;
 4621                 }
 4622                 for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) {
 4623                         if ((sc->local_pd_list[pd_index].driveState !=
 4624                                 MR_PD_STATE_SYSTEM) &&
 4625                                 (sc->target_list[pd_index].target_id !=
 4626                                 0xffff)) {
 4627                                 mrsas_remove_target(sc, pd_index);
 4628                         }
 4629                 }
 4630                 /*
 4631                  * Use mutext/spinlock if pd_list component size increase more than
 4632                  * 32 bit.
 4633                  */
 4634                 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
 4635                 do_ocr = 0;
 4636         }
 4637 dcmd_timeout:
 4638         mrsas_free_tmp_dcmd(tcmd);
 4639         free(tcmd, M_MRSAS);
 4640 
 4641         if (do_ocr)
 4642                 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
 4643 
 4644         if (!sc->mask_interrupts)
 4645                 mrsas_release_mfi_cmd(cmd);
 4646 
 4647         return (retcode);
 4648 }
 4649 
 4650 /*
 4651  * mrsas_get_ld_list:           Returns FW's LD list structure input:
 4652  * Adapter soft state
 4653  *
 4654  * Issues an internal command (DCMD) to get the FW's controller PD list
 4655  * structure.  This information is mainly used to find out about supported by
 4656  * the FW.
 4657  */
 4658 static int
 4659 mrsas_get_ld_list(struct mrsas_softc *sc)
 4660 {
 4661         int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id;
 4662         u_int8_t do_ocr = 1;
 4663         struct mrsas_mfi_cmd *cmd;
 4664         struct mrsas_dcmd_frame *dcmd;
 4665         struct MR_LD_LIST *ld_list_mem;
 4666         bus_addr_t ld_list_phys_addr = 0;
 4667         struct mrsas_tmp_dcmd *tcmd;
 4668 
 4669         cmd = mrsas_get_mfi_cmd(sc);
 4670         if (!cmd) {
 4671                 device_printf(sc->mrsas_dev,
 4672                     "Cannot alloc for get LD list cmd\n");
 4673                 return 1;
 4674         }
 4675         dcmd = &cmd->frame->dcmd;
 4676 
 4677         tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
 4678         ld_list_size = sizeof(struct MR_LD_LIST);
 4679         if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
 4680                 device_printf(sc->mrsas_dev,
 4681                     "Cannot alloc dmamap for get LD list cmd\n");
 4682                 mrsas_release_mfi_cmd(cmd);
 4683                 mrsas_free_tmp_dcmd(tcmd);
 4684                 free(tcmd, M_MRSAS);
 4685                 return (ENOMEM);
 4686         } else {
 4687                 ld_list_mem = tcmd->tmp_dcmd_mem;
 4688                 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
 4689         }
 4690         memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
 4691 
 4692         if (sc->max256vdSupport)
 4693                 dcmd->mbox.b[0] = 1;
 4694 
 4695         dcmd->cmd = MFI_CMD_DCMD;
 4696         dcmd->cmd_status = 0xFF;
 4697         dcmd->sge_count = 1;
 4698         dcmd->flags = MFI_FRAME_DIR_READ;
 4699         dcmd->timeout = 0;
 4700         dcmd->data_xfer_len = htole32(sizeof(struct MR_LD_LIST));
 4701         dcmd->opcode = htole32(MR_DCMD_LD_GET_LIST);
 4702         dcmd->sgl.sge32[0].phys_addr = htole32(ld_list_phys_addr);
 4703         dcmd->sgl.sge32[0].length = htole32(sizeof(struct MR_LD_LIST));
 4704         dcmd->pad_0 = 0;
 4705 
 4706         if (!sc->mask_interrupts)
 4707                 retcode = mrsas_issue_blocked_cmd(sc, cmd);
 4708         else
 4709                 retcode = mrsas_issue_polled(sc, cmd);
 4710 
 4711         if (retcode == ETIMEDOUT)
 4712                 goto dcmd_timeout;
 4713 
 4714 #if VD_EXT_DEBUG
 4715         printf("Number of LDs %d\n", ld_list_mem->ldCount);
 4716 #endif
 4717 
 4718         /* Get the instance LD list */
 4719         if (le32toh(ld_list_mem->ldCount) <= sc->fw_supported_vd_count) {
 4720                 sc->CurLdCount = le32toh(ld_list_mem->ldCount);
 4721                 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
 4722                 for (ld_index = 0; ld_index < le32toh(ld_list_mem->ldCount); ld_index++) {
 4723                         ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
 4724                         drv_tgt_id = ids + MRSAS_MAX_PD;
 4725                         if (ld_list_mem->ldList[ld_index].state != 0) {
 4726                                 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
 4727                                 if (sc->target_list[drv_tgt_id].target_id ==
 4728                                         0xffff)
 4729                                         mrsas_add_target(sc, drv_tgt_id);
 4730                         } else {
 4731                                 if (sc->target_list[drv_tgt_id].target_id !=
 4732                                         0xffff)
 4733                                         mrsas_remove_target(sc,
 4734                                                 drv_tgt_id);
 4735                         }
 4736                 }
 4737 
 4738                 do_ocr = 0;
 4739         }
 4740 dcmd_timeout:
 4741         mrsas_free_tmp_dcmd(tcmd);
 4742         free(tcmd, M_MRSAS);
 4743 
 4744         if (do_ocr)
 4745                 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
 4746         if (!sc->mask_interrupts)
 4747                 mrsas_release_mfi_cmd(cmd);
 4748 
 4749         return (retcode);
 4750 }
 4751 
 4752 /*
 4753  * mrsas_alloc_tmp_dcmd:       Allocates memory for temporary command input:
 4754  * Adapter soft state Temp command Size of allocation
 4755  *
 4756  * Allocates DMAable memory for a temporary internal command. The allocated
 4757  * memory is initialized to all zeros upon successful loading of the dma
 4758  * mapped memory.
 4759  */
 4760 int
 4761 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
 4762     struct mrsas_tmp_dcmd *tcmd, int size)
 4763 {
 4764         if (bus_dma_tag_create(sc->mrsas_parent_tag,
 4765             1, 0,
 4766             BUS_SPACE_MAXADDR_32BIT,
 4767             BUS_SPACE_MAXADDR,
 4768             NULL, NULL,
 4769             size,
 4770             1,
 4771             size,
 4772             BUS_DMA_ALLOCNOW,
 4773             NULL, NULL,
 4774             &tcmd->tmp_dcmd_tag)) {
 4775                 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
 4776                 return (ENOMEM);
 4777         }
 4778         if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
 4779             BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
 4780                 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
 4781                 return (ENOMEM);
 4782         }
 4783         if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
 4784             tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
 4785             &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
 4786                 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
 4787                 return (ENOMEM);
 4788         }
 4789         memset(tcmd->tmp_dcmd_mem, 0, size);
 4790         return (0);
 4791 }
 4792 
 4793 /*
 4794  * mrsas_free_tmp_dcmd:      Free memory for temporary command input:
 4795  * temporary dcmd pointer
 4796  *
 4797  * Deallocates memory of the temporary command for use in the construction of
 4798  * the internal DCMD.
 4799  */
 4800 void
 4801 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
 4802 {
 4803         if (tmp->tmp_dcmd_phys_addr)
 4804                 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
 4805         if (tmp->tmp_dcmd_mem != NULL)
 4806                 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
 4807         if (tmp->tmp_dcmd_tag != NULL)
 4808                 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
 4809 }
 4810 
 4811 /*
 4812  * mrsas_issue_blocked_abort_cmd:       Aborts previously issued cmd input:
 4813  * Adapter soft state Previously issued cmd to be aborted
 4814  *
 4815  * This function is used to abort previously issued commands, such as AEN and
 4816  * RAID map sync map commands.  The abort command is sent as a DCMD internal
 4817  * command and subsequently the driver will wait for a return status.  The
 4818  * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
 4819  */
 4820 static int
 4821 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
 4822     struct mrsas_mfi_cmd *cmd_to_abort)
 4823 {
 4824         struct mrsas_mfi_cmd *cmd;
 4825         struct mrsas_abort_frame *abort_fr;
 4826         u_int8_t retcode = 0;
 4827         unsigned long total_time = 0;
 4828         u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
 4829 
 4830         cmd = mrsas_get_mfi_cmd(sc);
 4831         if (!cmd) {
 4832                 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
 4833                 return (1);
 4834         }
 4835         abort_fr = &cmd->frame->abort;
 4836 
 4837         /* Prepare and issue the abort frame */
 4838         abort_fr->cmd = MFI_CMD_ABORT;
 4839         abort_fr->cmd_status = 0xFF;
 4840         abort_fr->flags = 0;
 4841         abort_fr->abort_context = cmd_to_abort->index;
 4842         abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
 4843         abort_fr->abort_mfi_phys_addr_hi = 0;
 4844 
 4845         cmd->sync_cmd = 1;
 4846         cmd->cmd_status = 0xFF;
 4847 
 4848         if (mrsas_issue_dcmd(sc, cmd)) {
 4849                 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
 4850                 return (1);
 4851         }
 4852         /* Wait for this cmd to complete */
 4853         sc->chan = (void *)&cmd;
 4854         while (1) {
 4855                 if (cmd->cmd_status == 0xFF) {
 4856                         tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
 4857                 } else
 4858                         break;
 4859                 total_time++;
 4860                 if (total_time >= max_wait) {
 4861                         device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
 4862                         retcode = 1;
 4863                         break;
 4864                 }
 4865         }
 4866 
 4867         cmd->sync_cmd = 0;
 4868         mrsas_release_mfi_cmd(cmd);
 4869         return (retcode);
 4870 }
 4871 
 4872 /*
 4873  * mrsas_complete_abort:      Completes aborting a command input:
 4874  * Adapter soft state Cmd that was issued to abort another cmd
 4875  *
 4876  * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
 4877  * change after sending the command.  This function is called from
 4878  * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
 4879  */
 4880 void
 4881 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
 4882 {
 4883         if (cmd->sync_cmd) {
 4884                 cmd->sync_cmd = 0;
 4885                 cmd->cmd_status = 0;
 4886                 sc->chan = (void *)&cmd;
 4887                 wakeup_one((void *)&sc->chan);
 4888         }
 4889         return;
 4890 }
 4891 
 4892 /*
 4893  * mrsas_aen_handler:   AEN processing callback function from thread context
 4894  * input:                               Adapter soft state
 4895  *
 4896  * Asynchronous event handler
 4897  */
 4898 void
 4899 mrsas_aen_handler(struct mrsas_softc *sc)
 4900 {
 4901         union mrsas_evt_class_locale class_locale;
 4902         int doscan = 0;
 4903         u_int32_t seq_num;
 4904         int error, fail_aen = 0;
 4905 
 4906         if (sc == NULL) {
 4907                 printf("invalid instance!\n");
 4908                 return;
 4909         }
 4910         if (sc->remove_in_progress || sc->reset_in_progress) {
 4911                 device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
 4912                         __func__, __LINE__);
 4913                 return;
 4914         }
 4915         if (sc->evt_detail_mem) {
 4916                 switch (sc->evt_detail_mem->code) {
 4917                 case MR_EVT_PD_INSERTED:
 4918                         fail_aen = mrsas_get_pd_list(sc);
 4919                         if (!fail_aen)
 4920                                 mrsas_bus_scan_sim(sc, sc->sim_1);
 4921                         else
 4922                                 goto skip_register_aen;
 4923                         break;
 4924                 case MR_EVT_PD_REMOVED:
 4925                         fail_aen = mrsas_get_pd_list(sc);
 4926                         if (!fail_aen)
 4927                                 mrsas_bus_scan_sim(sc, sc->sim_1);
 4928                         else
 4929                                 goto skip_register_aen;
 4930                         break;
 4931                 case MR_EVT_LD_OFFLINE:
 4932                 case MR_EVT_CFG_CLEARED:
 4933                 case MR_EVT_LD_DELETED:
 4934                         mrsas_bus_scan_sim(sc, sc->sim_0);
 4935                         break;
 4936                 case MR_EVT_LD_CREATED:
 4937                         fail_aen = mrsas_get_ld_list(sc);
 4938                         if (!fail_aen)
 4939                                 mrsas_bus_scan_sim(sc, sc->sim_0);
 4940                         else
 4941                                 goto skip_register_aen;
 4942                         break;
 4943                 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
 4944                 case MR_EVT_FOREIGN_CFG_IMPORTED:
 4945                 case MR_EVT_LD_STATE_CHANGE:
 4946                         doscan = 1;
 4947                         break;
 4948                 case MR_EVT_CTRL_PROP_CHANGED:
 4949                         fail_aen = mrsas_get_ctrl_info(sc);
 4950                         if (fail_aen)
 4951                                 goto skip_register_aen;
 4952                         break;
 4953                 default:
 4954                         break;
 4955                 }
 4956         } else {
 4957                 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
 4958                 return;
 4959         }
 4960         if (doscan) {
 4961                 fail_aen = mrsas_get_pd_list(sc);
 4962                 if (!fail_aen) {
 4963                         mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
 4964                         mrsas_bus_scan_sim(sc, sc->sim_1);
 4965                 } else
 4966                         goto skip_register_aen;
 4967 
 4968                 fail_aen = mrsas_get_ld_list(sc);
 4969                 if (!fail_aen) {
 4970                         mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
 4971                         mrsas_bus_scan_sim(sc, sc->sim_0);
 4972                 } else
 4973                         goto skip_register_aen;
 4974         }
 4975         seq_num = sc->evt_detail_mem->seq_num + 1;
 4976 
 4977         /* Register AEN with FW for latest sequence number plus 1 */
 4978         class_locale.members.reserved = 0;
 4979         class_locale.members.locale = MR_EVT_LOCALE_ALL;
 4980         class_locale.members.class = MR_EVT_CLASS_DEBUG;
 4981 
 4982         if (sc->aen_cmd != NULL)
 4983                 return;
 4984 
 4985         mtx_lock(&sc->aen_lock);
 4986         error = mrsas_register_aen(sc, seq_num,
 4987             class_locale.word);
 4988         mtx_unlock(&sc->aen_lock);
 4989 
 4990         if (error)
 4991                 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
 4992 
 4993 skip_register_aen:
 4994         return;
 4995 
 4996 }
 4997 
 4998 /*
 4999  * mrsas_complete_aen:  Completes AEN command
 5000  * input:                               Adapter soft state
 5001  *                                              Cmd that was issued to abort another cmd
 5002  *
 5003  * This function will be called from ISR and will continue event processing from
 5004  * thread context by enqueuing task in ev_tq (callback function
 5005  * "mrsas_aen_handler").
 5006  */
 5007 void
 5008 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
 5009 {
 5010         /*
 5011          * Don't signal app if it is just an aborted previously registered
 5012          * aen
 5013          */
 5014         if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
 5015                 sc->mrsas_aen_triggered = 1;
 5016                 mtx_lock(&sc->aen_lock);
 5017                 if (sc->mrsas_poll_waiting) {
 5018                         sc->mrsas_poll_waiting = 0;
 5019                         selwakeup(&sc->mrsas_select);
 5020                 }
 5021                 mtx_unlock(&sc->aen_lock);
 5022         } else
 5023                 cmd->abort_aen = 0;
 5024 
 5025         sc->aen_cmd = NULL;
 5026         mrsas_release_mfi_cmd(cmd);
 5027 
 5028         taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
 5029 
 5030         return;
 5031 }
 5032 
 5033 static device_method_t mrsas_methods[] = {
 5034         DEVMETHOD(device_probe, mrsas_probe),
 5035         DEVMETHOD(device_attach, mrsas_attach),
 5036         DEVMETHOD(device_detach, mrsas_detach),
 5037         DEVMETHOD(device_shutdown, mrsas_shutdown),
 5038         DEVMETHOD(device_suspend, mrsas_suspend),
 5039         DEVMETHOD(device_resume, mrsas_resume),
 5040         DEVMETHOD(bus_print_child, bus_generic_print_child),
 5041         DEVMETHOD(bus_driver_added, bus_generic_driver_added),
 5042         {0, 0}
 5043 };
 5044 
 5045 static driver_t mrsas_driver = {
 5046         "mrsas",
 5047         mrsas_methods,
 5048         sizeof(struct mrsas_softc)
 5049 };
 5050 
 5051 DRIVER_MODULE(mrsas, pci, mrsas_driver, 0, 0);
 5052 MODULE_DEPEND(mrsas, cam, 1, 1, 1);

Cache object: 7362a8aba4a17ae53189098c83f1b8ca


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.