The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mfi/mfi_tbolt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1  /*-
    2  * Redistribution and use in source and binary forms, with or without
    3  * modification, are permitted provided that the following conditions
    4  * are met:
    5  *
    6  *            Copyright 1994-2009 The FreeBSD Project.
    7  *            All rights reserved.
    8  *
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  *    THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
   17  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   18  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FREEBSD PROJECT OR
   19  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
   20  * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
   22  * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
   23  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
   24  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
   25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   26  *
   27  * The views and conclusions contained in the software and documentation
   28  * are those of the authors and should not be interpreted as representing
   29  * official policies,either expressed or implied, of the FreeBSD Project.
   30  */
   31 
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD$");
   35 
   36 #include "opt_mfi.h"
   37 
   38 #include <sys/param.h>
   39 #include <sys/types.h>
   40 #include <sys/kernel.h>
   41 #include <sys/selinfo.h>
   42 #include <sys/bus.h>
   43 #include <sys/conf.h>
   44 #include <sys/bio.h>
   45 #include <sys/ioccom.h>
   46 #include <sys/eventhandler.h>
   47 #include <sys/callout.h>
   48 #include <sys/uio.h>
   49 #include <machine/bus.h>
   50 #include <sys/sysctl.h>
   51 #include <sys/systm.h>
   52 #include <sys/malloc.h>
   53 
   54 #include <dev/mfi/mfireg.h>
   55 #include <dev/mfi/mfi_ioctl.h>
   56 #include <dev/mfi/mfivar.h>
   57 
   58 struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc *sc);
   59 union mfi_mpi2_request_descriptor *
   60 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index);
   61 void mfi_tbolt_complete_cmd(struct mfi_softc *sc);
   62 int mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
   63     struct mfi_cmd_tbolt *cmd);
   64 static inline void mfi_tbolt_return_cmd(struct mfi_softc *sc,
   65     struct mfi_cmd_tbolt *cmd);
   66 union mfi_mpi2_request_descriptor *mfi_tbolt_build_mpt_cmd(struct mfi_softc
   67     *sc, struct mfi_command *cmd);
   68 uint8_t
   69 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd);
   70 union mfi_mpi2_request_descriptor *mfi_build_and_issue_cmd(struct mfi_softc
   71     *sc, struct mfi_command *mfi_cmd);
   72 int mfi_tbolt_is_ldio(struct mfi_command *mfi_cmd);
   73 void mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
   74     struct mfi_cmd_tbolt *cmd);
   75 static int mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command
   76     *mfi_cmd, pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd);
   77 static int mfi_tbolt_build_cdb(struct mfi_softc *sc, struct mfi_command
   78     *mfi_cmd, uint8_t *cdb);
   79 void
   80 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
   81      uint8_t ext_status);
   82 static void mfi_issue_pending_cmds_again (struct mfi_softc *sc);
   83 static void mfi_kill_hba (struct mfi_softc *sc);
   84 static void mfi_process_fw_state_chg_isr(void *arg);
   85 static void mfi_sync_map_complete(struct mfi_command *);
   86 static void mfi_queue_map_sync(struct mfi_softc *sc);
   87 
   88 #define MFI_FUSION_ENABLE_INTERRUPT_MASK        (0x00000008)
   89 
   90 void
   91 mfi_tbolt_enable_intr_ppc(struct mfi_softc *sc)
   92 {
   93         MFI_WRITE4(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
   94         MFI_READ4(sc, MFI_OMSK);
   95 }
   96 
   97 void
   98 mfi_tbolt_disable_intr_ppc(struct mfi_softc *sc)
   99 {
  100         MFI_WRITE4(sc, MFI_OMSK, 0xFFFFFFFF);
  101         MFI_READ4(sc, MFI_OMSK);
  102 }
  103 
  104 int32_t
  105 mfi_tbolt_read_fw_status_ppc(struct mfi_softc *sc)
  106 {
  107         return MFI_READ4(sc, MFI_OSP0);
  108 }
  109 
  110 int32_t
  111 mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *sc)
  112 {
  113         int32_t status, mfi_status = 0;
  114 
  115         status = MFI_READ4(sc, MFI_OSTS);
  116 
  117         if (status & 1) {
  118                 MFI_WRITE4(sc, MFI_OSTS, status);
  119                 MFI_READ4(sc, MFI_OSTS);
  120                 if (status & MFI_STATE_CHANGE_INTERRUPT) {
  121                         mfi_status |= MFI_FIRMWARE_STATE_CHANGE;
  122                 }
  123 
  124                 return mfi_status;
  125         }
  126         if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
  127                 return 1;
  128 
  129         MFI_READ4(sc, MFI_OSTS);
  130         return 0;
  131 }
  132 
  133 
  134 void
  135 mfi_tbolt_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
  136    uint32_t frame_cnt)
  137 {
  138         bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
  139             << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
  140         MFI_WRITE4(sc, MFI_IQPL, (uint32_t)bus_add);
  141         MFI_WRITE4(sc, MFI_IQPH, (uint32_t)((uint64_t)bus_add >> 32));
  142 }
  143 
  144 /*
  145  * mfi_tbolt_adp_reset - For controller reset
  146  * @regs: MFI register set
  147  */
  148 int
  149 mfi_tbolt_adp_reset(struct mfi_softc *sc)
  150 {
  151         int retry = 0, i = 0;
  152         int HostDiag;
  153 
  154         MFI_WRITE4(sc, MFI_WSR, 0xF);
  155         MFI_WRITE4(sc, MFI_WSR, 4);
  156         MFI_WRITE4(sc, MFI_WSR, 0xB);
  157         MFI_WRITE4(sc, MFI_WSR, 2);
  158         MFI_WRITE4(sc, MFI_WSR, 7);
  159         MFI_WRITE4(sc, MFI_WSR, 0xD);
  160 
  161         for (i = 0; i < 10000; i++) ;
  162 
  163         HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
  164 
  165         while (!( HostDiag & DIAG_WRITE_ENABLE)) {
  166                 for (i = 0; i < 1000; i++);
  167                 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
  168                 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%x, "
  169                     "hostdiag=%x\n", retry, HostDiag);
  170 
  171                 if (retry++ >= 100)
  172                         return 1;
  173         }
  174 
  175         device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: HostDiag=%x\n", HostDiag);
  176 
  177         MFI_WRITE4(sc, MFI_HDR, (HostDiag | DIAG_RESET_ADAPTER));
  178 
  179         for (i=0; i < 10; i++) {
  180                 for (i = 0; i < 10000; i++);
  181         }
  182 
  183         HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
  184         while (HostDiag & DIAG_RESET_ADAPTER) {
  185                 for (i = 0; i < 1000; i++) ;
  186                 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
  187                 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%x, "
  188                     "hostdiag=%x\n", retry, HostDiag);
  189 
  190                 if (retry++ >= 1000)
  191                         return 1;
  192         }
  193         return 0;
  194 }
  195 
  196 /*
  197  * This routine initialize Thunderbolt specific device information
  198  */
  199 void
  200 mfi_tbolt_init_globals(struct mfi_softc *sc)
  201 {
  202         /* Initialize single reply size and Message size */
  203         sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE;
  204         sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
  205 
  206         /*
  207          * Calculating how many SGEs allowed in a allocated main message
  208          * (size of the Message - Raid SCSI IO message size(except SGE))
  209          * / size of SGE
  210          * (0x100 - (0x90 - 0x10)) / 0x10 = 8
  211          */
  212         sc->max_SGEs_in_main_message =
  213             (uint8_t)((sc->raid_io_msg_size
  214             - (sizeof(struct mfi_mpi2_request_raid_scsi_io)
  215             - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION));
  216         /*
  217          * (Command frame size allocaed in SRB ext - Raid SCSI IO message size)
  218          * / size of SGL ;
  219          * (1280 - 256) / 16 = 64
  220          */
  221         sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE
  222             - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION);
  223         /*
  224          * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46  one is left for command
  225          * colscing
  226         */
  227         sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1)
  228             + sc->max_SGEs_in_chain_message - 1;
  229         /*
  230         * This is the offset in number of 4 * 32bit words to the next chain
  231         * (0x100 - 0x10)/0x10 = 0xF(15)
  232         */
  233         sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size
  234             - sizeof(MPI2_SGE_IO_UNION))/16;
  235         sc->chain_offset_value_for_mpt_ptmsg
  236             = offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL)/16;
  237         sc->mfi_cmd_pool_tbolt = NULL;
  238         sc->request_desc_pool = NULL;
  239 }
  240 
  241 /*
  242  * This function calculates the memory requirement for Thunderbolt
  243  * controller, returns the total required memory in bytes
  244  */
  245 
  246 uint32_t
  247 mfi_tbolt_get_memory_requirement(struct mfi_softc *sc)
  248 {
  249         uint32_t size;
  250         size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT;      /* for Alignment */
  251         size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1);
  252         size += sc->reply_size * sc->mfi_max_fw_cmds;
  253         /* this is for SGL's */
  254         size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds;
  255         return size;
  256 }
  257 
  258 /*
  259  * Description:
  260  *      This function will prepare message pools for the Thunderbolt controller
  261  * Arguments:
  262  *      DevExt - HBA miniport driver's adapter data storage structure
  263  *      pMemLocation - start of the memory allocated for Thunderbolt.
  264  * Return Value:
  265  *      TRUE if successful
  266  *      FALSE if failed
  267  */
  268 int
  269 mfi_tbolt_init_desc_pool(struct mfi_softc *sc, uint8_t* mem_location,
  270     uint32_t tbolt_contg_length)
  271 {
  272         uint32_t     offset = 0;
  273         uint8_t      *addr = mem_location;
  274 
  275         /* Request Descriptor Base physical Address */
  276 
  277         /* For Request Decriptors Virtual Memory */
  278         /* Initialise the aligned IO Frames Virtual Memory Pointer */
  279         if (((uintptr_t)addr) & (0xFF)) {
  280                 addr = &addr[sc->raid_io_msg_size];
  281                 addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
  282                 sc->request_message_pool_align = addr;
  283         } else
  284                 sc->request_message_pool_align = addr;
  285 
  286         offset = sc->request_message_pool_align - sc->request_message_pool;
  287         sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset;
  288 
  289         /* DJA XXX should this be bus dma ??? */
  290         /* Skip request message pool */
  291         addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)];
  292         /* Reply Frame Pool is initialized */
  293         sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
  294         if (((uintptr_t)addr) & (0xFF)) {
  295                 addr = &addr[sc->reply_size];
  296                 addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
  297         }
  298         sc->reply_frame_pool_align
  299                     = (struct mfi_mpi2_reply_header *)addr;
  300 
  301         offset = (uintptr_t)sc->reply_frame_pool_align
  302             - (uintptr_t)sc->request_message_pool;
  303         sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset;
  304 
  305         /* Skip Reply Frame Pool */
  306         addr += sc->reply_size * sc->mfi_max_fw_cmds;
  307         sc->reply_pool_limit = addr;
  308 
  309         /* initializing reply address to 0xFFFFFFFF */
  310         memset((uint8_t *)sc->reply_frame_pool, 0xFF,
  311                (sc->reply_size * sc->mfi_max_fw_cmds));
  312 
  313         offset = sc->reply_size * sc->mfi_max_fw_cmds;
  314         sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset;
  315         /* initialize the last_reply_idx to 0 */
  316         sc->last_reply_idx = 0;
  317         offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
  318             sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr;
  319         if (offset > tbolt_contg_length)
  320                 device_printf(sc->mfi_dev, "Error:Initialized more than "
  321                     "allocated\n");
  322         return 0;
  323 }
  324 
  325 /*
  326  * This routine prepare and issue INIT2 frame to the Firmware
  327  */
  328 
  329 int
  330 mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
  331 {
  332         struct MPI2_IOC_INIT_REQUEST   *mpi2IocInit;
  333         struct mfi_init_frame   *mfi_init;
  334         uintptr_t                       offset = 0;
  335         bus_addr_t                      phyAddress;
  336         MFI_ADDRESS                     *mfiAddressTemp;
  337         struct mfi_command *cm;
  338         int error;
  339 
  340         mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc;
  341         /* Check if initialization is already completed */
  342         if (sc->MFA_enabled) {
  343                 return 1;
  344         }
  345 
  346         mtx_lock(&sc->mfi_io_lock);
  347         if ((cm = mfi_dequeue_free(sc)) == NULL) {
  348                 mtx_unlock(&sc->mfi_io_lock);
  349                 return (EBUSY);
  350         }
  351         cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init);
  352         cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr;
  353         cm->cm_dmamap = sc->mfi_tb_init_dmamap;
  354         cm->cm_frame->header.context = 0;
  355         cm->cm_sc = sc;
  356         cm->cm_index = 0;
  357 
  358         /*
  359          * Abuse the SG list area of the frame to hold the init_qinfo
  360          * object;
  361          */
  362         mfi_init = &cm->cm_frame->init;
  363 
  364         bzero(mpi2IocInit, sizeof(struct MPI2_IOC_INIT_REQUEST));
  365         mpi2IocInit->Function  = MPI2_FUNCTION_IOC_INIT;
  366         mpi2IocInit->WhoInit   = MPI2_WHOINIT_HOST_DRIVER;
  367 
  368         /* set MsgVersion and HeaderVersion host driver was built with */
  369         mpi2IocInit->MsgVersion = MPI2_VERSION;
  370         mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
  371         mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4;
  372         mpi2IocInit->ReplyDescriptorPostQueueDepth
  373             = (uint16_t)sc->mfi_max_fw_cmds;
  374         mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
  375 
  376         /* Get physical address of reply frame pool */
  377         offset = (uintptr_t) sc->reply_frame_pool_align
  378             - (uintptr_t)sc->request_message_pool;
  379         phyAddress = sc->mfi_tb_busaddr + offset;
  380         mfiAddressTemp =
  381             (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
  382         mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
  383         mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
  384 
  385         /* Get physical address of request message pool */
  386         offset = sc->request_message_pool_align - sc->request_message_pool;
  387         phyAddress =  sc->mfi_tb_busaddr + offset;
  388         mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress;
  389         mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
  390         mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
  391         mpi2IocInit->ReplyFreeQueueAddress =  0; /* Not supported by MR. */
  392         mpi2IocInit->TimeStamp = time_uptime;
  393 
  394         if (sc->verbuf) {
  395                 snprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
  396                 MEGASAS_VERSION);
  397                 mfi_init->driver_ver_lo = (uint32_t)sc->verbuf_h_busaddr;
  398                 mfi_init->driver_ver_hi =
  399                     (uint32_t)((uint64_t)sc->verbuf_h_busaddr >> 32);
  400         }
  401         /* Get the physical address of the mpi2 ioc init command */
  402         phyAddress =  sc->mfi_tb_ioc_init_busaddr;
  403         mfi_init->qinfo_new_addr_lo = (uint32_t)phyAddress;
  404         mfi_init->qinfo_new_addr_hi = (uint32_t)((uint64_t)phyAddress >> 32);
  405         mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
  406 
  407         mfi_init->header.cmd = MFI_CMD_INIT;
  408         mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
  409         mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS;
  410 
  411         cm->cm_data = NULL;
  412         cm->cm_flags |= MFI_CMD_POLLED;
  413         cm->cm_timestamp = time_uptime;
  414         if ((error = mfi_mapcmd(sc, cm)) != 0) {
  415                 device_printf(sc->mfi_dev, "failed to send IOC init2 "
  416                     "command %d at %lx\n", error, (long)cm->cm_frame_busaddr);
  417                 mfi_release_command(cm);
  418                 mtx_unlock(&sc->mfi_io_lock);
  419                 return (error);
  420         }
  421         mfi_release_command(cm);
  422         mtx_unlock(&sc->mfi_io_lock);
  423 
  424         if (mfi_init->header.cmd_status == 0) {
  425                 sc->MFA_enabled = 1;
  426         }
  427         else {
  428                 device_printf(sc->mfi_dev, "Init command Failed %x\n",
  429                     mfi_init->header.cmd_status);
  430                 return 1;
  431         }
  432 
  433         return 0;
  434 
  435 }
  436 
  437 int
  438 mfi_tbolt_alloc_cmd(struct mfi_softc *sc)
  439 {
  440         struct mfi_cmd_tbolt *cmd;
  441         bus_addr_t io_req_base_phys;
  442         uint8_t *io_req_base;
  443         int i = 0, j = 0, offset = 0;
  444 
  445         /*
  446          * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers.
  447          * Allocate the dynamic array first and then allocate individual
  448          * commands.
  449          */
  450         sc->request_desc_pool = malloc(sizeof(
  451             union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds,
  452             M_MFIBUF, M_NOWAIT|M_ZERO);
  453         sc->mfi_cmd_pool_tbolt = malloc(sizeof(struct mfi_cmd_tbolt*)
  454             * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO);
  455 
  456         if (!sc->mfi_cmd_pool_tbolt) {
  457                 device_printf(sc->mfi_dev, "out of memory. Could not alloc "
  458                     "memory for cmd_list_fusion\n");
  459                 return 1;
  460         }
  461 
  462         for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
  463                 sc->mfi_cmd_pool_tbolt[i] = malloc(sizeof(
  464                     struct mfi_cmd_tbolt),M_MFIBUF, M_NOWAIT|M_ZERO);
  465 
  466                 if (!sc->mfi_cmd_pool_tbolt[i]) {
  467                         device_printf(sc->mfi_dev, "Could not alloc cmd list "
  468                             "fusion\n");
  469 
  470                         for (j = 0; j < i; j++)
  471                                 free(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF);
  472 
  473                         free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
  474                         sc->mfi_cmd_pool_tbolt = NULL;
  475                 }
  476         }
  477 
  478         /*
  479          * The first 256 bytes (SMID 0) is not used. Don't add to the cmd
  480          *list
  481          */
  482         io_req_base = sc->request_message_pool_align
  483                 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
  484         io_req_base_phys = sc->request_msg_busaddr
  485                 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
  486 
  487         /*
  488          * Add all the commands to command pool (instance->cmd_pool)
  489          */
  490         /* SMID 0 is reserved. Set SMID/index from 1 */
  491 
  492         for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
  493                 cmd = sc->mfi_cmd_pool_tbolt[i];
  494                 offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
  495                 cmd->index = i + 1;
  496                 cmd->request_desc = (union mfi_mpi2_request_descriptor *)
  497                     (sc->request_desc_pool + i);
  498                 cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *)
  499                     (io_req_base + offset);
  500                 cmd->io_request_phys_addr = io_req_base_phys + offset;
  501                 cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit
  502                     + i * MEGASAS_MAX_SZ_CHAIN_FRAME);
  503                 cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i
  504                     * MEGASAS_MAX_SZ_CHAIN_FRAME;
  505 
  506                 TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next);
  507         }
  508         return 0;
  509 }
  510 
  511 int
  512 mfi_tbolt_reset(struct mfi_softc *sc)
  513 {
  514         uint32_t fw_state;
  515 
  516         mtx_lock(&sc->mfi_io_lock);
  517         if (sc->hw_crit_error) {
  518                 device_printf(sc->mfi_dev, "HW CRITICAL ERROR\n");
  519                 mtx_unlock(&sc->mfi_io_lock);
  520                 return 1;
  521         }
  522 
  523         if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
  524                 fw_state = sc->mfi_read_fw_status(sc);
  525                 if ((fw_state & MFI_FWSTATE_FAULT) == MFI_FWSTATE_FAULT) {
  526                         if ((sc->disableOnlineCtrlReset == 0)
  527                             && (sc->adpreset == 0)) {
  528                                 device_printf(sc->mfi_dev, "Adapter RESET "
  529                                     "condition is detected\n");
  530                                 sc->adpreset = 1;
  531                                 sc->issuepend_done = 0;
  532                                 sc->MFA_enabled = 0;
  533                                 sc->last_reply_idx = 0;
  534                                 mfi_process_fw_state_chg_isr((void *) sc);
  535                         }
  536                         mtx_unlock(&sc->mfi_io_lock);
  537                         return 0;
  538                 }
  539         }
  540         mtx_unlock(&sc->mfi_io_lock);
  541         return 1;
  542 }
  543 
  544 /*
  545  * mfi_intr_tbolt - isr entry point
  546  */
  547 void
  548 mfi_intr_tbolt(void *arg)
  549 {
  550         struct mfi_softc *sc = (struct mfi_softc *)arg;
  551 
  552         if (sc->mfi_check_clear_intr(sc) == 1) {
  553                 return;
  554         }
  555         if (sc->mfi_detaching)
  556                 return;
  557         mtx_lock(&sc->mfi_io_lock);
  558         mfi_tbolt_complete_cmd(sc);
  559         if (sc->mfi_flags & MFI_FLAGS_QFRZN)
  560                 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
  561         mfi_startio(sc);
  562         mtx_unlock(&sc->mfi_io_lock);
  563         return;
  564 }
  565 
  566 /*
  567  * map_cmd_status -     Maps FW cmd status to OS cmd status
  568  * @cmd :               Pointer to cmd
  569  * @status :            status of cmd returned by FW
  570  * @ext_status :        ext status of cmd returned by FW
  571  */
  572 
  573 void
  574 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
  575     uint8_t ext_status)
  576 {
  577 
  578         switch (status) {
  579                 case MFI_STAT_OK:
  580                         mfi_cmd->cm_frame->header.cmd_status = 0;
  581                         mfi_cmd->cm_frame->dcmd.header.cmd_status = 0;
  582                         break;
  583 
  584                 case MFI_STAT_SCSI_IO_FAILED:
  585                 case MFI_STAT_LD_INIT_IN_PROGRESS:
  586                         mfi_cmd->cm_frame->header.cmd_status = status;
  587                         mfi_cmd->cm_frame->header.scsi_status = ext_status;
  588                         mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
  589                         mfi_cmd->cm_frame->dcmd.header.scsi_status
  590                             = ext_status;
  591                         break;
  592 
  593                 case MFI_STAT_SCSI_DONE_WITH_ERROR:
  594                         mfi_cmd->cm_frame->header.cmd_status = ext_status;
  595                         mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status;
  596                         break;
  597 
  598                 case MFI_STAT_LD_OFFLINE:
  599                 case MFI_STAT_DEVICE_NOT_FOUND:
  600                         mfi_cmd->cm_frame->header.cmd_status = status;
  601                         mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
  602                         break;
  603 
  604                 default:
  605                         mfi_cmd->cm_frame->header.cmd_status = status;
  606                         mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
  607                         break;
  608                 }
  609 }
  610 
  611 /*
  612  * mfi_tbolt_return_cmd -       Return a cmd to free command pool
  613  * @instance:           Adapter soft state
  614  * @cmd:                Command packet to be returned to free command pool
  615  */
  616 static inline void
  617 mfi_tbolt_return_cmd(struct mfi_softc *sc, struct mfi_cmd_tbolt *cmd)
  618 {
  619         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
  620 
  621         TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, cmd, next);
  622 }
  623 
  624 void
  625 mfi_tbolt_complete_cmd(struct mfi_softc *sc)
  626 {
  627         struct mfi_mpi2_reply_header *desc, *reply_desc;
  628         struct mfi_command *cmd_mfi, *cmd_mfi_check;    /* For MFA Cmds */
  629         struct mfi_cmd_tbolt *cmd_tbolt;
  630         uint16_t smid;
  631         uint8_t reply_descript_type;
  632         struct mfi_mpi2_request_raid_scsi_io  *scsi_io_req;
  633         uint32_t status, extStatus;
  634         uint16_t num_completed;
  635         union desc_value val;
  636 
  637         desc = (struct mfi_mpi2_reply_header *)
  638                 ((uintptr_t)sc->reply_frame_pool_align
  639                 + sc->last_reply_idx * sc->reply_size);
  640         reply_desc = desc;
  641 
  642         if (!reply_desc)
  643                 device_printf(sc->mfi_dev, "reply desc is NULL!!\n");
  644 
  645         reply_descript_type = reply_desc->ReplyFlags
  646              & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
  647         if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
  648                 return;
  649 
  650         num_completed = 0;
  651         val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words;
  652 
  653         /* Read Reply descriptor */
  654         while ((val.u.low != 0xFFFFFFFF) && (val.u.high != 0xFFFFFFFF)) {
  655                 smid = reply_desc->SMID;
  656                 if (!smid || smid > sc->mfi_max_fw_cmds + 1) {
  657                         device_printf(sc->mfi_dev, "smid is %x. Cannot "
  658                             "proceed. Returning \n", smid);
  659                         return;
  660                 }
  661 
  662                 cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1];
  663                 cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx];
  664                 scsi_io_req = cmd_tbolt->io_request;
  665 
  666                 status = cmd_mfi->cm_frame->dcmd.header.cmd_status;
  667                 extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status;
  668                 map_tbolt_cmd_status(cmd_mfi, status, extStatus);
  669 
  670                 /* remove command from busy queue if not polled */
  671                 TAILQ_FOREACH(cmd_mfi_check, &sc->mfi_busy, cm_link) {
  672                         if (cmd_mfi_check == cmd_mfi) {
  673                                 mfi_remove_busy(cmd_mfi);
  674                                 break;
  675                         }
  676                 }
  677                 cmd_mfi->cm_error = 0;
  678                 mfi_complete(sc, cmd_mfi);
  679                 mfi_tbolt_return_cmd(sc, cmd_tbolt);
  680 
  681                 sc->last_reply_idx++;
  682                 if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) {
  683                         MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
  684                         sc->last_reply_idx = 0;
  685                 }
  686                 /*set it back to all 0xfff.*/
  687                 ((union mfi_mpi2_reply_descriptor*)desc)->words =
  688                         ~((uint64_t)0x00);
  689 
  690                 num_completed++;
  691 
  692                 /* Get the next reply descriptor */
  693                 desc = (struct mfi_mpi2_reply_header *)
  694                     ((uintptr_t)sc->reply_frame_pool_align
  695                     + sc->last_reply_idx * sc->reply_size);
  696                 reply_desc = desc;
  697                 val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words;
  698                 reply_descript_type = reply_desc->ReplyFlags
  699                     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
  700                 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
  701                         break;
  702         }
  703 
  704         if (!num_completed)
  705                 return;
  706 
  707         /* update replyIndex to FW */
  708         if (sc->last_reply_idx)
  709                 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
  710 
  711         return;
  712 }
  713 
  714 /*
  715  * mfi_get_cmd -        Get a command from the free pool
  716  * @instance:           Adapter soft state
  717  *
  718  * Returns a free command from the pool
  719  */
  720 
  721 struct mfi_cmd_tbolt *
  722 mfi_tbolt_get_cmd(struct mfi_softc *sc)
  723 {
  724         struct mfi_cmd_tbolt *cmd = NULL;
  725 
  726         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
  727 
  728         cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh);
  729         TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next);
  730         memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
  731         memset((uint8_t *)cmd->io_request, 0,
  732             MEGASAS_THUNDERBOLT_NEW_MSG_SIZE);
  733         return cmd;
  734 }
  735 
  736 union mfi_mpi2_request_descriptor *
  737 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index)
  738 {
  739         uint8_t *p;
  740 
  741         if (index >= sc->mfi_max_fw_cmds) {
  742                 device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request "
  743                     "for descriptor\n", index);
  744                 return NULL;
  745         }
  746         p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor)
  747             * index;
  748         memset(p, 0, sizeof(union mfi_mpi2_request_descriptor));
  749         return (union mfi_mpi2_request_descriptor *)p;
  750 }
  751 
  752 
  753 /* Used to build IOCTL cmd */
  754 uint8_t
  755 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
  756 {
  757         MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
  758         struct mfi_mpi2_request_raid_scsi_io *io_req;
  759         struct mfi_cmd_tbolt *cmd;
  760 
  761         cmd = mfi_tbolt_get_cmd(sc);
  762         if (!cmd)
  763                 return EBUSY;
  764         mfi_cmd->cm_extra_frames = cmd->index; /* Frame count used as SMID */
  765         cmd->sync_cmd_idx = mfi_cmd->cm_index;
  766         io_req = cmd->io_request;
  767         mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
  768 
  769         io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
  770         io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io,
  771             SGL) / 4;
  772         io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg;
  773 
  774         mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr;
  775 
  776         /*
  777           In MFI pass thru, nextChainOffset will always be zero to
  778           indicate the end of the chain.
  779         */
  780         mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
  781                 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
  782 
  783         /* setting the length to the maximum length */
  784         mpi25_ieee_chain->Length = 1024;
  785 
  786         return 0;
  787 }
  788 
  789 void
  790 mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
  791     struct mfi_cmd_tbolt *cmd)
  792 {
  793         uint32_t start_lba_lo = 0, start_lba_hi = 0, device_id;
  794         struct mfi_mpi2_request_raid_scsi_io    *io_request;
  795         struct IO_REQUEST_INFO io_info;
  796 
  797         device_id = mfi_cmd->cm_frame->io.header.target_id;
  798         io_request = cmd->io_request;
  799         io_request->RaidContext.TargetID = device_id;
  800         io_request->RaidContext.Status = 0;
  801         io_request->RaidContext.exStatus =0;
  802 
  803         start_lba_lo = mfi_cmd->cm_frame->io.lba_lo;
  804         start_lba_hi = mfi_cmd->cm_frame->io.lba_hi;
  805 
  806         memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
  807         io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
  808         io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len;
  809         io_info.ldTgtId = device_id;
  810         if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) ==
  811             MFI_FRAME_DIR_READ)
  812                 io_info.isRead = 1;
  813 
  814                 io_request->RaidContext.timeoutValue
  815                      = MFI_FUSION_FP_DEFAULT_TIMEOUT;
  816                 io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST;
  817                 io_request->DevHandle = device_id;
  818                 cmd->request_desc->header.RequestFlags
  819                     = (MFI_REQ_DESCRIPT_FLAGS_LD_IO
  820                     << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
  821         if ((io_request->IoFlags == 6) && (io_info.numBlocks == 0))
  822                 io_request->RaidContext.RegLockLength = 0x100;
  823         io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len
  824             * MFI_SECTOR_LEN;
  825 }
  826 
  827 int
  828 mfi_tbolt_is_ldio(struct mfi_command *mfi_cmd)
  829 {
  830         if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ
  831             || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
  832                 return 1;
  833         else
  834                 return 0;
  835 }
  836 
  837 int
  838 mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
  839     struct mfi_cmd_tbolt *cmd)
  840 {
  841         uint32_t device_id;
  842         uint32_t sge_count;
  843         uint8_t cdb[32], cdb_len;
  844 
  845         memset(cdb, 0, 32);
  846         struct mfi_mpi2_request_raid_scsi_io *io_request = cmd->io_request;
  847 
  848         device_id = mfi_cmd->cm_frame->header.target_id;
  849 
  850         /* Have to build CDB here for TB as BSD don't have a scsi layer */
  851         if ((cdb_len = mfi_tbolt_build_cdb(sc, mfi_cmd, cdb)) == 1)
  852                 return 1;
  853 
  854         /* Just the CDB length,rest of the Flags are zero */
  855         io_request->IoFlags = cdb_len;
  856         memcpy(io_request->CDB.CDB32, cdb, 32);
  857 
  858         if (mfi_tbolt_is_ldio(mfi_cmd))
  859                 mfi_tbolt_build_ldio(sc, mfi_cmd , cmd);
  860         else
  861                 return 1;
  862 
  863         /*
  864          * Construct SGL
  865          */
  866         sge_count = mfi_tbolt_make_sgl(sc, mfi_cmd,
  867             (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd);
  868         if (sge_count > sc->mfi_max_sge) {
  869                 device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds "
  870                     "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge);
  871                 return 1;
  872         }
  873         io_request->RaidContext.numSGE = sge_count;
  874         io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
  875 
  876         if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
  877                 io_request->Control = MPI2_SCSIIO_CONTROL_WRITE;
  878         else
  879                 io_request->Control = MPI2_SCSIIO_CONTROL_READ;
  880 
  881         io_request->SGLOffset0 = offsetof(
  882             struct mfi_mpi2_request_raid_scsi_io, SGL)/4;
  883 
  884         io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr;
  885         io_request->SenseBufferLength = MFI_SENSE_LEN;
  886         return 0;
  887 }
  888 
  889 static int
  890 mfi_tbolt_build_cdb(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
  891     uint8_t *cdb)
  892 {
  893         uint32_t lba_lo, lba_hi, num_lba;
  894         uint8_t cdb_len;
  895 
  896         if (mfi_cmd == NULL || cdb == NULL)
  897                 return 1;
  898         num_lba = mfi_cmd->cm_frame->io.header.data_len;
  899         lba_lo = mfi_cmd->cm_frame->io.lba_lo;
  900         lba_hi = mfi_cmd->cm_frame->io.lba_hi;
  901 
  902         if (lba_hi == 0 && (num_lba <= 0xFF) && (lba_lo <= 0x1FFFFF)) {
  903                 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
  904                         /* Read 6 or Write 6 */
  905                         cdb[0] = (uint8_t) (0x0A);
  906                 else
  907                         cdb[0] = (uint8_t) (0x08);
  908 
  909                 cdb[4] = (uint8_t) num_lba;
  910                 cdb[3] = (uint8_t) (lba_lo & 0xFF);
  911                 cdb[2] = (uint8_t) (lba_lo >> 8);
  912                 cdb[1] = (uint8_t) ((lba_lo >> 16) & 0x1F);
  913                 cdb_len = 6;
  914         }
  915         else if (lba_hi == 0 && (num_lba <= 0xFFFF) && (lba_lo <= 0xFFFFFFFF)) {
  916                 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
  917                         /* Read 10 or Write 10 */
  918                         cdb[0] = (uint8_t) (0x2A);
  919                 else
  920                         cdb[0] = (uint8_t) (0x28);
  921                 cdb[8] = (uint8_t) (num_lba & 0xFF);
  922                 cdb[7] = (uint8_t) (num_lba >> 8);
  923                 cdb[5] = (uint8_t) (lba_lo & 0xFF);
  924                 cdb[4] = (uint8_t) (lba_lo >> 8);
  925                 cdb[3] = (uint8_t) (lba_lo >> 16);
  926                 cdb[2] = (uint8_t) (lba_lo >> 24);
  927                 cdb_len = 10;
  928         } else if ((num_lba > 0xFFFF) && (lba_hi == 0)) {
  929                 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
  930                         /* Read 12 or Write 12 */
  931                         cdb[0] = (uint8_t) (0xAA);
  932                 else
  933                         cdb[0] = (uint8_t) (0xA8);
  934                 cdb[9] = (uint8_t) (num_lba & 0xFF);
  935                 cdb[8] = (uint8_t) (num_lba >> 8);
  936                 cdb[7] = (uint8_t) (num_lba >> 16);
  937                 cdb[6] = (uint8_t) (num_lba >> 24);
  938                 cdb[5] = (uint8_t) (lba_lo & 0xFF);
  939                 cdb[4] = (uint8_t) (lba_lo >> 8);
  940                 cdb[3] = (uint8_t) (lba_lo >> 16);
  941                 cdb[2] = (uint8_t) (lba_lo >> 24);
  942                 cdb_len = 12;
  943         } else {
  944                 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
  945                         cdb[0] = (uint8_t) (0x8A);
  946                 else
  947                         cdb[0] = (uint8_t) (0x88);
  948                 cdb[13] = (uint8_t) (num_lba & 0xFF);
  949                 cdb[12] = (uint8_t) (num_lba >> 8);
  950                 cdb[11] = (uint8_t) (num_lba >> 16);
  951                 cdb[10] = (uint8_t) (num_lba >> 24);
  952                 cdb[9] = (uint8_t) (lba_lo & 0xFF);
  953                 cdb[8] = (uint8_t) (lba_lo >> 8);
  954                 cdb[7] = (uint8_t) (lba_lo >> 16);
  955                 cdb[6] = (uint8_t) (lba_lo >> 24);
  956                 cdb[5] = (uint8_t) (lba_hi & 0xFF);
  957                 cdb[4] = (uint8_t) (lba_hi >> 8);
  958                 cdb[3] = (uint8_t) (lba_hi >> 16);
  959                 cdb[2] = (uint8_t) (lba_hi >> 24);
  960                 cdb_len = 16;
  961         }
  962         return cdb_len;
  963 }
  964 
  965 static int
  966 mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
  967                    pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd)
  968 {
  969         uint8_t i, sg_processed, sg_to_process;
  970         uint8_t sge_count, sge_idx;
  971         union mfi_sgl *os_sgl;
  972 
  973         /*
  974          * Return 0 if there is no data transfer
  975          */
  976         if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) {
  977                 device_printf(sc->mfi_dev, "Buffer empty \n");
  978                 return 0;
  979         }
  980         os_sgl = mfi_cmd->cm_sg;
  981         sge_count = mfi_cmd->cm_frame->header.sg_count;
  982 
  983         if (sge_count > sc->mfi_max_sge) {
  984                 device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n",
  985                     os_sgl, sge_count);
  986                 return sge_count;
  987         }
  988 
  989         if (sge_count > sc->max_SGEs_in_main_message)
  990                 /* One element to store the chain info */
  991                 sge_idx = sc->max_SGEs_in_main_message - 1;
  992         else
  993                 sge_idx = sge_count;
  994 
  995         for (i = 0; i < sge_idx; i++) {
  996                 /*
  997                  * For 32bit BSD we are getting 32 bit SGL's from OS
  998                  * but FW only take 64 bit SGL's so copying from 32 bit
  999                  * SGL's to 64.
 1000                  */
 1001                 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
 1002                         sgl_ptr->Length = os_sgl->sg_skinny[i].len;
 1003                         sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
 1004                 } else {
 1005                         sgl_ptr->Length = os_sgl->sg32[i].len;
 1006                         sgl_ptr->Address = os_sgl->sg32[i].addr;
 1007                 }
 1008                 sgl_ptr->Flags = 0;
 1009                 sgl_ptr++;
 1010                 cmd->io_request->ChainOffset = 0;
 1011         }
 1012 
 1013         sg_processed = i;
 1014 
 1015         if (sg_processed < sge_count) {
 1016                 pMpi25IeeeSgeChain64_t sg_chain;
 1017                 sg_to_process = sge_count - sg_processed;
 1018                 cmd->io_request->ChainOffset =
 1019                     sc->chain_offset_value_for_main_message;
 1020                 sg_chain = sgl_ptr;
 1021                 /* Prepare chain element */
 1022                 sg_chain->NextChainOffset = 0;
 1023                 sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
 1024                     MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
 1025                 sg_chain->Length =  (sizeof(MPI2_SGE_IO_UNION) *
 1026                     (sge_count - sg_processed));
 1027                 sg_chain->Address = cmd->sg_frame_phys_addr;
 1028                 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame;
 1029                 for (; i < sge_count; i++) {
 1030                         if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
 1031                                 sgl_ptr->Length = os_sgl->sg_skinny[i].len;
 1032                                 sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
 1033                         } else {
 1034                                 sgl_ptr->Length = os_sgl->sg32[i].len;
 1035                                 sgl_ptr->Address = os_sgl->sg32[i].addr;
 1036                         }
 1037                         sgl_ptr->Flags = 0;
 1038                         sgl_ptr++;
 1039                 }
 1040         }
 1041         return sge_count;
 1042 }
 1043 
 1044 union mfi_mpi2_request_descriptor *
 1045 mfi_build_and_issue_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
 1046 {
 1047         struct mfi_cmd_tbolt *cmd;
 1048         union mfi_mpi2_request_descriptor *req_desc = NULL;
 1049         uint16_t index;
 1050         cmd = mfi_tbolt_get_cmd(sc);
 1051         if (!cmd)
 1052                 return NULL;
 1053         mfi_cmd->cm_extra_frames = cmd->index;
 1054         cmd->sync_cmd_idx = mfi_cmd->cm_index;
 1055 
 1056         index = cmd->index;
 1057         req_desc = mfi_tbolt_get_request_descriptor(sc, index-1);
 1058         if (mfi_tbolt_build_io(sc, mfi_cmd, cmd))
 1059                 return NULL;
 1060         req_desc->header.SMID = index;
 1061         return req_desc;
 1062 }
 1063 
 1064 union mfi_mpi2_request_descriptor *
 1065 mfi_tbolt_build_mpt_cmd(struct mfi_softc *sc, struct mfi_command *cmd)
 1066 {
 1067         union mfi_mpi2_request_descriptor *req_desc = NULL;
 1068         uint16_t index;
 1069         if (mfi_build_mpt_pass_thru(sc, cmd)) {
 1070                 device_printf(sc->mfi_dev, "Couldn't build MFI pass thru "
 1071                     "cmd\n");
 1072                 return NULL;
 1073         }
 1074         /* For fusion the frame_count variable is used for SMID */
 1075         index = cmd->cm_extra_frames;
 1076 
 1077         req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1);
 1078         if (!req_desc)
 1079                 return NULL;
 1080 
 1081         bzero(req_desc, sizeof(req_desc));
 1082         req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
 1083             MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 1084         req_desc->header.SMID = index;
 1085         return req_desc;
 1086 }
 1087 
 1088 int
 1089 mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
 1090 {
 1091         struct mfi_frame_header *hdr;
 1092         uint8_t *cdb;
 1093         union mfi_mpi2_request_descriptor *req_desc = NULL;
 1094         int tm = MFI_POLL_TIMEOUT_SECS * 1000;
 1095 
 1096         hdr = &cm->cm_frame->header;
 1097         cdb = cm->cm_frame->pass.cdb;
 1098         if (sc->adpreset)
 1099                 return 1;
 1100         if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
 1101                 cm->cm_timestamp = time_uptime;
 1102                 mfi_enqueue_busy(cm);
 1103         }
 1104         else {  /* still get interrupts for it */
 1105                 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
 1106                 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
 1107         }
 1108 
 1109         if (hdr->cmd == MFI_CMD_PD_SCSI_IO) {
 1110                 /* check for inquiry commands coming from CLI */
 1111                 if (cdb[0] != 0x28 || cdb[0] != 0x2A) {
 1112                         if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) ==
 1113                             NULL) {
 1114                                 device_printf(sc->mfi_dev, "Mapping from MFI "
 1115                                     "to MPT Failed \n");
 1116                                 return 1;
 1117                         }
 1118                 }
 1119                 else
 1120                         device_printf(sc->mfi_dev, "DJA NA XXX SYSPDIO\n");
 1121         }
 1122         else if (hdr->cmd == MFI_CMD_LD_SCSI_IO ||
 1123             hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) {
 1124                 if ((req_desc = mfi_build_and_issue_cmd(sc, cm)) == NULL) {
 1125                         device_printf(sc->mfi_dev, "LDIO Failed \n");
 1126                         return 1;
 1127                 }
 1128         } else
 1129                 if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) {
 1130                         device_printf(sc->mfi_dev, "Mapping from MFI to MPT "
 1131                             "Failed\n");
 1132                         return 1;
 1133                 }
 1134         MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF));
 1135         MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20));
 1136 
 1137         if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
 1138                 return 0;
 1139 
 1140         /* This is a polled command, so busy-wait for it to complete. */
 1141         while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
 1142                 DELAY(1000);
 1143                 tm -= 1;
 1144                 if (tm <= 0)
 1145                 break;
 1146         }
 1147 
 1148         if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
 1149                 device_printf(sc->mfi_dev, "Frame %p timed out "
 1150                     "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
 1151                 return (ETIMEDOUT);
 1152         }
 1153         return 0;
 1154 }
 1155 
 1156 static void
 1157 mfi_issue_pending_cmds_again (struct mfi_softc *sc)
 1158 {
 1159         struct mfi_command *cm, *tmp;
 1160 
 1161         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1162         TAILQ_FOREACH_REVERSE_SAFE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp) {
 1163 
 1164                 cm->retry_for_fw_reset++;
 1165 
 1166                 /*
 1167                  * If a command has continuously been tried multiple times
 1168                  * and causing a FW reset condition, no further recoveries
 1169                  * should be performed on the controller
 1170                  */
 1171                 if (cm->retry_for_fw_reset == 3) {
 1172                         device_printf(sc->mfi_dev, "megaraid_sas: command %d "
 1173                             "was tried multiple times during adapter reset"
 1174                             "Shutting down the HBA\n", cm->cm_index);
 1175                         mfi_kill_hba(sc);
 1176                         sc->hw_crit_error = 1;
 1177                         return;
 1178                 }
 1179 
 1180                 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0) {
 1181                         struct mfi_cmd_tbolt *cmd;
 1182                         mfi_remove_busy(cm);
 1183                         cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames -
 1184                             1 ];
 1185                         mfi_tbolt_return_cmd(sc, cmd);
 1186                         if ((cm->cm_flags & MFI_ON_MFIQ_MASK) == 0) {
 1187                                 if (cm->cm_frame->dcmd.opcode !=
 1188                                     MFI_DCMD_CTRL_EVENT_WAIT) {
 1189                                         device_printf(sc->mfi_dev,
 1190                                             "APJ ****requeue command %d \n",
 1191                                             cm->cm_index);
 1192                                         mfi_requeue_ready(cm);
 1193                                 }
 1194                         }
 1195                         else
 1196                                 mfi_release_command(cm);
 1197                 }
 1198         }
 1199         mfi_startio(sc);
 1200 }
 1201 
 1202 static void
 1203 mfi_kill_hba (struct mfi_softc *sc)
 1204 {
 1205         if (sc->mfi_flags & MFI_FLAGS_TBOLT)
 1206                 MFI_WRITE4 (sc, 0x00,MFI_STOP_ADP);
 1207         else
 1208                 MFI_WRITE4 (sc, MFI_IDB,MFI_STOP_ADP);
 1209 }
 1210 
 1211 static void
 1212 mfi_process_fw_state_chg_isr(void *arg)
 1213 {
 1214         struct mfi_softc *sc= (struct mfi_softc *)arg;
 1215         struct mfi_cmd_tbolt *cmd;
 1216         int error, status;
 1217 
 1218         if (sc->adpreset == 1) {
 1219                 device_printf(sc->mfi_dev, "First stage of FW reset "
 1220                      "initiated...\n");
 1221 
 1222                 sc->mfi_adp_reset(sc);
 1223                 sc->mfi_enable_intr(sc);
 1224 
 1225                 device_printf(sc->mfi_dev, "First stage of reset complete, "
 1226                     "second stage initiated...\n");
 1227 
 1228                 sc->adpreset = 2;
 1229 
 1230                 /* waiting for about 20 second before start the second init */
 1231                 for (int wait = 0; wait < 20000; wait++)
 1232                         DELAY(1000);
 1233                 device_printf(sc->mfi_dev, "Second stage of FW reset "
 1234                      "initiated...\n");
 1235                 while ((status = MFI_READ4(sc, MFI_RSR)) & 0x04);
 1236 
 1237                 sc->mfi_disable_intr(sc);
 1238 
 1239                 /* We expect the FW state to be READY */
 1240                 if (mfi_transition_firmware(sc)) {
 1241                         device_printf(sc->mfi_dev, "controller is not in "
 1242                             "ready state\n");
 1243                         mfi_kill_hba(sc);
 1244                         sc->hw_crit_error= 1;
 1245                         return ;
 1246                 }
 1247                 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0)
 1248                                 return;
 1249 
 1250                 mtx_lock(&sc->mfi_io_lock);
 1251 
 1252                 sc->mfi_enable_intr(sc);
 1253                 sc->adpreset = 0;
 1254                 free(sc->mfi_aen_cm->cm_data, M_MFIBUF);
 1255                 mfi_remove_busy(sc->mfi_aen_cm);
 1256                 cmd = sc->mfi_cmd_pool_tbolt[sc->mfi_aen_cm->cm_extra_frames
 1257                     - 1];
 1258                 mfi_tbolt_return_cmd(sc, cmd);
 1259                 if (sc->mfi_aen_cm) {
 1260                         mfi_release_command(sc->mfi_aen_cm);
 1261                         sc->mfi_aen_cm = NULL;
 1262                 }
 1263                 if (sc->mfi_map_sync_cm) {
 1264                         mfi_release_command(sc->mfi_map_sync_cm);
 1265                         sc->mfi_map_sync_cm = NULL;
 1266                 }
 1267                 mfi_issue_pending_cmds_again(sc);
 1268 
 1269                 /*
 1270                  * Issue pending command can result in adapter being marked
 1271                  * dead because of too many re-tries. Check for that
 1272                  * condition before clearing the reset condition on the FW
 1273                  */
 1274                 if (!sc->hw_crit_error) {
 1275                         /*
 1276                          * Initiate AEN (Asynchronous Event Notification)
 1277                          */
 1278                         mfi_aen_setup(sc, sc->last_seq_num);
 1279                         sc->issuepend_done = 1;
 1280                         device_printf(sc->mfi_dev, "second stage of reset "
 1281                             "complete, FW is ready now.\n");
 1282                 } else {
 1283                         device_printf(sc->mfi_dev, "second stage of reset "
 1284                              "never completed, hba was marked offline.\n");
 1285                 }
 1286         } else {
 1287                 device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr "
 1288                     "called with unhandled value:%d\n", sc->adpreset);
 1289         }
 1290         mtx_unlock(&sc->mfi_io_lock);
 1291 }
 1292 
 1293 /*
 1294  * The ThunderBolt HW has an option for the driver to directly
 1295  * access the underlying disks and operate on the RAID.  To
 1296  * do this there needs to be a capability to keep the RAID controller
 1297  * and driver in sync.  The FreeBSD driver does not take advantage
 1298  * of this feature since it adds a lot of complexity and slows down
 1299  * performance.  Performance is gained by using the controller's
 1300  * cache etc.
 1301  *
 1302  * Even though this driver doesn't access the disks directly, an
 1303  * AEN like command is used to inform the RAID firmware to "sync"
 1304  * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command.  This
 1305  * command in write mode will return when the RAID firmware has
 1306  * detected a change to the RAID state.  Examples of this type
 1307  * of change are removing a disk.  Once the command returns then
 1308  * the driver needs to acknowledge this and "sync" all LD's again.
 1309  * This repeats until we shutdown.  Then we need to cancel this
 1310  * pending command.
 1311  *
 1312  * If this is not done right the RAID firmware will not remove a
 1313  * pulled drive and the RAID won't go degraded etc.  Effectively,
 1314  * stopping any RAID mangement to functions.
 1315  *
 1316  * Doing another LD sync, requires the use of an event since the
 1317  * driver needs to do a mfi_wait_command and can't do that in an
 1318  * interrupt thread.
 1319  *
 1320  * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO
 1321  * That requires a bunch of structure and it is simplier to just do
 1322  * the MFI_DCMD_LD_GET_LIST versus walking the RAID map.
 1323  */
 1324 
 1325 void
 1326 mfi_tbolt_sync_map_info(struct mfi_softc *sc)
 1327 {
 1328         int error = 0, i;
 1329         struct mfi_command *cmd;
 1330         struct mfi_dcmd_frame *dcmd;
 1331         uint32_t context = 0;
 1332         union mfi_ld_ref *ld_sync;
 1333         size_t ld_size;
 1334         struct mfi_frame_header *hdr;
 1335         struct mfi_command *cm = NULL;
 1336         struct mfi_ld_list *list = NULL;
 1337 
 1338         if (sc->mfi_map_sync_cm != NULL || sc->cm_map_abort)
 1339                 return;
 1340 
 1341         mtx_lock(&sc->mfi_io_lock);
 1342         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
 1343             (void **)&list, sizeof(*list));
 1344         if (error)
 1345                 goto out;
 1346 
 1347         cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAIN;
 1348         if (mfi_wait_command(sc, cm) != 0) {
 1349                 device_printf(sc->mfi_dev, "Failed to get device listing\n");
 1350                 goto out;
 1351         }
 1352 
 1353         hdr = &cm->cm_frame->header;
 1354         if (hdr->cmd_status != MFI_STAT_OK) {
 1355                 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
 1356                               hdr->cmd_status);
 1357                 goto out;
 1358         }
 1359 
 1360         ld_size = sizeof(*ld_sync) * list->ld_count;
 1361         mtx_unlock(&sc->mfi_io_lock);
 1362         ld_sync = (union mfi_ld_ref *) malloc(ld_size, M_MFIBUF,
 1363              M_WAITOK | M_ZERO);
 1364         if (ld_sync == NULL) {
 1365                 device_printf(sc->mfi_dev, "Failed to allocate sync\n");
 1366                 goto out;
 1367         }
 1368         for (i = 0; i < list->ld_count; i++) {
 1369                 ld_sync[i].ref = list->ld_list[i].ld.ref;
 1370         }
 1371 
 1372         mtx_lock(&sc->mfi_io_lock);
 1373         if ((cmd = mfi_dequeue_free(sc)) == NULL) {
 1374                 device_printf(sc->mfi_dev, "Failed to get command\n");
 1375                 free(ld_sync, M_MFIBUF);
 1376                 goto out;
 1377         }
 1378         
 1379         context = cmd->cm_frame->header.context;
 1380         bzero(cmd->cm_frame, sizeof(union mfi_frame));
 1381         cmd->cm_frame->header.context = context;
 1382 
 1383         dcmd = &cmd->cm_frame->dcmd;
 1384         bzero(dcmd->mbox, MFI_MBOX_SIZE);
 1385         dcmd->header.cmd = MFI_CMD_DCMD;
 1386         dcmd->header.flags = MFI_FRAME_DIR_WRITE;
 1387         dcmd->header.timeout = 0;
 1388         dcmd->header.data_len = ld_size;
 1389         dcmd->header.scsi_status = 0;
 1390         dcmd->opcode = MFI_DCMD_LD_MAP_GET_INFO;
 1391         cmd->cm_sg = &dcmd->sgl;
 1392         cmd->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
 1393         cmd->cm_data = ld_sync;
 1394         cmd->cm_private = ld_sync;
 1395 
 1396         cmd->cm_len = ld_size;
 1397         cmd->cm_complete = mfi_sync_map_complete;
 1398         sc->mfi_map_sync_cm = cmd;
 1399 
 1400         cmd->cm_flags = MFI_CMD_DATAOUT;
 1401         cmd->cm_frame->dcmd.mbox[0] = list->ld_count;
 1402         cmd->cm_frame->dcmd.mbox[1] = MFI_DCMD_MBOX_PEND_FLAG;
 1403 
 1404         if ((error = mfi_mapcmd(sc, cmd)) != 0) {
 1405                 device_printf(sc->mfi_dev, "failed to send map sync\n");
 1406                 free(ld_sync, M_MFIBUF);
 1407                 sc->mfi_map_sync_cm = NULL;
 1408                 mfi_requeue_ready(cmd);
 1409                 goto out;
 1410         }
 1411 
 1412 out:
 1413         if (list)
 1414                 free(list, M_MFIBUF);
 1415         if (cm)
 1416                 mfi_release_command(cm);
 1417         mtx_unlock(&sc->mfi_io_lock);
 1418 }
 1419 
 1420 static void
 1421 mfi_sync_map_complete(struct mfi_command *cm)
 1422 {
 1423         struct mfi_frame_header *hdr;
 1424         struct mfi_softc *sc;
 1425         int aborted = 0;
 1426 
 1427         sc = cm->cm_sc;
 1428         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1429 
 1430         hdr = &cm->cm_frame->header;
 1431 
 1432         if (sc->mfi_map_sync_cm == NULL)
 1433                 return;
 1434 
 1435         if (sc->cm_map_abort ||
 1436             hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
 1437                 sc->cm_map_abort = 0;
 1438                 aborted = 1;
 1439         }
 1440 
 1441         free(cm->cm_data, M_MFIBUF);
 1442         sc->mfi_map_sync_cm = NULL;
 1443         wakeup(&sc->mfi_map_sync_cm);
 1444         mfi_release_command(cm);
 1445 
 1446         /* set it up again so the driver can catch more events */
 1447         if (!aborted) {
 1448                 mfi_queue_map_sync(sc);
 1449         }
 1450 }
 1451 
 1452 static void
 1453 mfi_queue_map_sync(struct mfi_softc *sc)
 1454 {
 1455         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1456         taskqueue_enqueue(taskqueue_swi, &sc->mfi_map_sync_task);
 1457 }
 1458 
 1459 void
 1460 mfi_handle_map_sync(void *context, int pending)
 1461 {
 1462         struct mfi_softc *sc;
 1463 
 1464         sc = context;
 1465         mfi_tbolt_sync_map_info(sc);
 1466 }

Cache object: a405124d1046ab14d03b50645fe708bb


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.