The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mfi/mfi.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2006 IronPort Systems
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 /*-
   27  * Copyright (c) 2007 LSI Corp.
   28  * Copyright (c) 2007 Rajesh Prabhakaran.
   29  * All rights reserved.
   30  *
   31  * Redistribution and use in source and binary forms, with or without
   32  * modification, are permitted provided that the following conditions
   33  * are met:
   34  * 1. Redistributions of source code must retain the above copyright
   35  *    notice, this list of conditions and the following disclaimer.
   36  * 2. Redistributions in binary form must reproduce the above copyright
   37  *    notice, this list of conditions and the following disclaimer in the
   38  *    documentation and/or other materials provided with the distribution.
   39  *
   40  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   50  * SUCH DAMAGE.
   51  */
   52 
   53 #include <sys/cdefs.h>
   54 __FBSDID("$FreeBSD: releng/9.1/sys/dev/mfi/mfi.c 254631 2013-08-22 00:51:48Z delphij $");
   55 
   56 #include "opt_compat.h"
   57 #include "opt_mfi.h"
   58 
   59 #include <sys/param.h>
   60 #include <sys/systm.h>
   61 #include <sys/sysctl.h>
   62 #include <sys/malloc.h>
   63 #include <sys/kernel.h>
   64 #include <sys/poll.h>
   65 #include <sys/selinfo.h>
   66 #include <sys/bus.h>
   67 #include <sys/conf.h>
   68 #include <sys/eventhandler.h>
   69 #include <sys/rman.h>
   70 #include <sys/bus_dma.h>
   71 #include <sys/bio.h>
   72 #include <sys/ioccom.h>
   73 #include <sys/uio.h>
   74 #include <sys/proc.h>
   75 #include <sys/signalvar.h>
   76 #include <sys/taskqueue.h>
   77 
   78 #include <machine/bus.h>
   79 #include <machine/resource.h>
   80 
   81 #include <dev/mfi/mfireg.h>
   82 #include <dev/mfi/mfi_ioctl.h>
   83 #include <dev/mfi/mfivar.h>
   84 #include <sys/interrupt.h>
   85 #include <sys/priority.h>
   86 
   87 static int      mfi_alloc_commands(struct mfi_softc *);
   88 static int      mfi_comms_init(struct mfi_softc *);
   89 static int      mfi_get_controller_info(struct mfi_softc *);
   90 static int      mfi_get_log_state(struct mfi_softc *,
   91                     struct mfi_evt_log_state **);
   92 static int      mfi_parse_entries(struct mfi_softc *, int, int);
   93 static void     mfi_data_cb(void *, bus_dma_segment_t *, int, int);
   94 static void     mfi_startup(void *arg);
   95 static void     mfi_intr(void *arg);
   96 static void     mfi_ldprobe(struct mfi_softc *sc);
   97 static void     mfi_syspdprobe(struct mfi_softc *sc);
   98 static void     mfi_handle_evt(void *context, int pending);
   99 static int      mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
  100 static void     mfi_aen_complete(struct mfi_command *);
  101 static int      mfi_add_ld(struct mfi_softc *sc, int);
  102 static void     mfi_add_ld_complete(struct mfi_command *);
  103 static int      mfi_add_sys_pd(struct mfi_softc *sc, int);
  104 static void     mfi_add_sys_pd_complete(struct mfi_command *);
  105 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
  106 static void     mfi_bio_complete(struct mfi_command *);
  107 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
  108 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
  109 static int      mfi_send_frame(struct mfi_softc *, struct mfi_command *);
  110 static int      mfi_abort(struct mfi_softc *, struct mfi_command **);
  111 static int      mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
  112 static void     mfi_timeout(void *);
  113 static int      mfi_user_command(struct mfi_softc *,
  114                     struct mfi_ioc_passthru *);
  115 static void     mfi_enable_intr_xscale(struct mfi_softc *sc);
  116 static void     mfi_enable_intr_ppc(struct mfi_softc *sc);
  117 static int32_t  mfi_read_fw_status_xscale(struct mfi_softc *sc);
  118 static int32_t  mfi_read_fw_status_ppc(struct mfi_softc *sc);
  119 static int      mfi_check_clear_intr_xscale(struct mfi_softc *sc);
  120 static int      mfi_check_clear_intr_ppc(struct mfi_softc *sc);
  121 static void     mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
  122                     uint32_t frame_cnt);
  123 static void     mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
  124                     uint32_t frame_cnt);
  125 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
  126 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
  127 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
  128 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
  129 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
  130 
  131 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
  132 static int      mfi_event_locale = MFI_EVT_LOCALE_ALL;
  133 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
  134 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
  135             0, "event message locale");
  136 
  137 static int      mfi_event_class = MFI_EVT_CLASS_INFO;
  138 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
  139 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
  140           0, "event message class");
  141 
  142 static int      mfi_max_cmds = 128;
  143 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
  144 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
  145            0, "Max commands");
  146 
  147 static int      mfi_detect_jbod_change = 1;
  148 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
  149 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
  150            &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
  151 
  152 /* Management interface */
  153 static d_open_t         mfi_open;
  154 static d_close_t        mfi_close;
  155 static d_ioctl_t        mfi_ioctl;
  156 static d_poll_t         mfi_poll;
  157 
  158 static struct cdevsw mfi_cdevsw = {
  159         .d_version =    D_VERSION,
  160         .d_flags =      0,
  161         .d_open =       mfi_open,
  162         .d_close =      mfi_close,
  163         .d_ioctl =      mfi_ioctl,
  164         .d_poll =       mfi_poll,
  165         .d_name =       "mfi",
  166 };
  167 
  168 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
  169 
  170 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
  171 struct mfi_skinny_dma_info mfi_skinny;
  172 
  173 static void
  174 mfi_enable_intr_xscale(struct mfi_softc *sc)
  175 {
  176         MFI_WRITE4(sc, MFI_OMSK, 0x01);
  177 }
  178 
  179 static void
  180 mfi_enable_intr_ppc(struct mfi_softc *sc)
  181 {
  182         if (sc->mfi_flags & MFI_FLAGS_1078) {
  183                 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
  184                 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
  185         }
  186         else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
  187                 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
  188                 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
  189         }
  190         else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
  191                 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
  192         }
  193 }
  194 
  195 static int32_t
  196 mfi_read_fw_status_xscale(struct mfi_softc *sc)
  197 {
  198         return MFI_READ4(sc, MFI_OMSG0);
  199 }
  200 
  201 static int32_t
  202 mfi_read_fw_status_ppc(struct mfi_softc *sc)
  203 {
  204         return MFI_READ4(sc, MFI_OSP0);
  205 }
  206 
  207 static int
  208 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
  209 {
  210         int32_t status;
  211 
  212         status = MFI_READ4(sc, MFI_OSTS);
  213         if ((status & MFI_OSTS_INTR_VALID) == 0)
  214                 return 1;
  215 
  216         MFI_WRITE4(sc, MFI_OSTS, status);
  217         return 0;
  218 }
  219 
  220 static int
  221 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
  222 {
  223         int32_t status;
  224 
  225         status = MFI_READ4(sc, MFI_OSTS);
  226         if (sc->mfi_flags & MFI_FLAGS_1078) {
  227                 if (!(status & MFI_1078_RM)) {
  228                         return 1;
  229                 }
  230         }
  231         else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
  232                 if (!(status & MFI_GEN2_RM)) {
  233                         return 1;
  234                 }
  235         }
  236         else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
  237                 if (!(status & MFI_SKINNY_RM)) {
  238                         return 1;
  239                 }
  240         }
  241         if (sc->mfi_flags & MFI_FLAGS_SKINNY)
  242                 MFI_WRITE4(sc, MFI_OSTS, status);
  243         else
  244                 MFI_WRITE4(sc, MFI_ODCR0, status);
  245         return 0;
  246 }
  247 
  248 static void
  249 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
  250 {
  251         MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
  252 }
  253 
  254 static void
  255 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
  256 {
  257         if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
  258             MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
  259             MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
  260         } else {
  261             MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
  262         }
  263 }
  264 
  265 int
  266 mfi_transition_firmware(struct mfi_softc *sc)
  267 {
  268         uint32_t fw_state, cur_state;
  269         int max_wait, i;
  270         uint32_t cur_abs_reg_val = 0;
  271         uint32_t prev_abs_reg_val = 0;
  272 
  273         cur_abs_reg_val = sc->mfi_read_fw_status(sc);
  274         fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
  275         while (fw_state != MFI_FWSTATE_READY) {
  276                 if (bootverbose)
  277                         device_printf(sc->mfi_dev, "Waiting for firmware to "
  278                         "become ready\n");
  279                 cur_state = fw_state;
  280                 switch (fw_state) {
  281                 case MFI_FWSTATE_FAULT:
  282                         device_printf(sc->mfi_dev, "Firmware fault\n");
  283                         return (ENXIO);
  284                 case MFI_FWSTATE_WAIT_HANDSHAKE:
  285                         if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
  286                             MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
  287                         else
  288                             MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
  289                         max_wait = MFI_RESET_WAIT_TIME;
  290                         break;
  291                 case MFI_FWSTATE_OPERATIONAL:
  292                         if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
  293                             MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
  294                         else
  295                             MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
  296                         max_wait = MFI_RESET_WAIT_TIME;
  297                         break;
  298                 case MFI_FWSTATE_UNDEFINED:
  299                 case MFI_FWSTATE_BB_INIT:
  300                         max_wait = MFI_RESET_WAIT_TIME;
  301                         break;
  302                 case MFI_FWSTATE_FW_INIT_2:
  303                         max_wait = MFI_RESET_WAIT_TIME;
  304                         break;
  305                 case MFI_FWSTATE_FW_INIT:
  306                 case MFI_FWSTATE_FLUSH_CACHE:
  307                         max_wait = MFI_RESET_WAIT_TIME;
  308                         break;
  309                 case MFI_FWSTATE_DEVICE_SCAN:
  310                         max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
  311                         prev_abs_reg_val = cur_abs_reg_val;
  312                         break;
  313                 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
  314                         if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
  315                             MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
  316                         else
  317                             MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
  318                         max_wait = MFI_RESET_WAIT_TIME;
  319                         break;
  320                 default:
  321                         device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
  322                             fw_state);
  323                         return (ENXIO);
  324                 }
  325                 for (i = 0; i < (max_wait * 10); i++) {
  326                         cur_abs_reg_val = sc->mfi_read_fw_status(sc);
  327                         fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
  328                         if (fw_state == cur_state)
  329                                 DELAY(100000);
  330                         else
  331                                 break;
  332                 }
  333                 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
  334                         /* Check the device scanning progress */
  335                         if (prev_abs_reg_val != cur_abs_reg_val) {
  336                                 continue;
  337                         }
  338                 }
  339                 if (fw_state == cur_state) {
  340                         device_printf(sc->mfi_dev, "Firmware stuck in state "
  341                             "%#x\n", fw_state);
  342                         return (ENXIO);
  343                 }
  344         }
  345         return (0);
  346 }
  347 
  348 static void
  349 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  350 {
  351         bus_addr_t *addr;
  352 
  353         addr = arg;
  354         *addr = segs[0].ds_addr;
  355 }
  356 
  357 
  358 int
  359 mfi_attach(struct mfi_softc *sc)
  360 {
  361         uint32_t status;
  362         int error, commsz, framessz, sensesz;
  363         int frames, unit, max_fw_sge;
  364         uint32_t tb_mem_size = 0;
  365 
  366         if (sc == NULL)
  367                 return EINVAL;
  368 
  369         device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
  370             MEGASAS_VERSION);
  371 
  372         mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
  373         sx_init(&sc->mfi_config_lock, "MFI config");
  374         TAILQ_INIT(&sc->mfi_ld_tqh);
  375         TAILQ_INIT(&sc->mfi_syspd_tqh);
  376         TAILQ_INIT(&sc->mfi_ld_pend_tqh);
  377         TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
  378         TAILQ_INIT(&sc->mfi_evt_queue);
  379         TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
  380         TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
  381         TAILQ_INIT(&sc->mfi_aen_pids);
  382         TAILQ_INIT(&sc->mfi_cam_ccbq);
  383 
  384         mfi_initq_free(sc);
  385         mfi_initq_ready(sc);
  386         mfi_initq_busy(sc);
  387         mfi_initq_bio(sc);
  388 
  389         sc->adpreset = 0;
  390         sc->last_seq_num = 0;
  391         sc->disableOnlineCtrlReset = 1;
  392         sc->issuepend_done = 1;
  393         sc->hw_crit_error = 0;
  394 
  395         if (sc->mfi_flags & MFI_FLAGS_1064R) {
  396                 sc->mfi_enable_intr = mfi_enable_intr_xscale;
  397                 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
  398                 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
  399                 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
  400         } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
  401                 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
  402                 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
  403                 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
  404                 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
  405                 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
  406                 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
  407                 sc->mfi_tbolt = 1;
  408                 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
  409         } else {
  410                 sc->mfi_enable_intr =  mfi_enable_intr_ppc;
  411                 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
  412                 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
  413                 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
  414         }
  415 
  416 
  417         /* Before we get too far, see if the firmware is working */
  418         if ((error = mfi_transition_firmware(sc)) != 0) {
  419                 device_printf(sc->mfi_dev, "Firmware not in READY state, "
  420                     "error %d\n", error);
  421                 return (ENXIO);
  422         }
  423 
  424         /* Start: LSIP200113393 */
  425         if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  426                                 1, 0,                   /* algnmnt, boundary */
  427                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  428                                 BUS_SPACE_MAXADDR,      /* highaddr */
  429                                 NULL, NULL,             /* filter, filterarg */
  430                                 MEGASAS_MAX_NAME*sizeof(bus_addr_t),                    /* maxsize */
  431                                 1,                      /* msegments */
  432                                 MEGASAS_MAX_NAME*sizeof(bus_addr_t),                    /* maxsegsize */
  433                                 0,                      /* flags */
  434                                 NULL, NULL,             /* lockfunc, lockarg */
  435                                 &sc->verbuf_h_dmat)) {
  436                 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
  437                 return (ENOMEM);
  438         }
  439         if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
  440             BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
  441                 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
  442                 return (ENOMEM);
  443         }
  444         bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
  445         bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
  446             sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
  447             mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
  448         /* End: LSIP200113393 */
  449 
  450         /*
  451          * Get information needed for sizing the contiguous memory for the
  452          * frame pool.  Size down the sgl parameter since we know that
  453          * we will never need more than what's required for MAXPHYS.
  454          * It would be nice if these constants were available at runtime
  455          * instead of compile time.
  456          */
  457         status = sc->mfi_read_fw_status(sc);
  458         sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
  459         max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
  460         sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
  461 
  462         /* ThunderBolt Support get the contiguous memory */
  463 
  464         if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
  465                 mfi_tbolt_init_globals(sc);
  466                 device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
  467                     sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
  468                 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
  469 
  470                 if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  471                                 1, 0,                   /* algnmnt, boundary */
  472                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  473                                 BUS_SPACE_MAXADDR,      /* highaddr */
  474                                 NULL, NULL,             /* filter, filterarg */
  475                                 tb_mem_size,            /* maxsize */
  476                                 1,                      /* msegments */
  477                                 tb_mem_size,            /* maxsegsize */
  478                                 0,                      /* flags */
  479                                 NULL, NULL,             /* lockfunc, lockarg */
  480                                 &sc->mfi_tb_dmat)) {
  481                         device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
  482                         return (ENOMEM);
  483                 }
  484                 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
  485                 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
  486                         device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
  487                         return (ENOMEM);
  488                 }
  489                 bzero(sc->request_message_pool, tb_mem_size);
  490                 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
  491                 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
  492 
  493                 /* For ThunderBolt memory init */
  494                 if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  495                                 0x100, 0,               /* alignmnt, boundary */
  496                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  497                                 BUS_SPACE_MAXADDR,      /* highaddr */
  498                                 NULL, NULL,             /* filter, filterarg */
  499                                 MFI_FRAME_SIZE,         /* maxsize */
  500                                 1,                      /* msegments */
  501                                 MFI_FRAME_SIZE,         /* maxsegsize */
  502                                 0,                      /* flags */
  503                                 NULL, NULL,             /* lockfunc, lockarg */
  504                                 &sc->mfi_tb_init_dmat)) {
  505                 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
  506                 return (ENOMEM);
  507                 }
  508                 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
  509                     BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
  510                         device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
  511                         return (ENOMEM);
  512                 }
  513                 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
  514                 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
  515                 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
  516                     &sc->mfi_tb_init_busaddr, 0);
  517                 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
  518                     tb_mem_size)) {
  519                         device_printf(sc->mfi_dev,
  520                             "Thunderbolt pool preparation error\n");
  521                         return 0;
  522                 }
  523 
  524                 /*
  525                   Allocate DMA memory mapping for MPI2 IOC Init descriptor,
  526                   we are taking it diffrent from what we have allocated for Request
  527                   and reply descriptors to avoid confusion later
  528                 */
  529                 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
  530                 if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  531                                 1, 0,                   /* algnmnt, boundary */
  532                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  533                                 BUS_SPACE_MAXADDR,      /* highaddr */
  534                                 NULL, NULL,             /* filter, filterarg */
  535                                 tb_mem_size,            /* maxsize */
  536                                 1,                      /* msegments */
  537                                 tb_mem_size,            /* maxsegsize */
  538                                 0,                      /* flags */
  539                                 NULL, NULL,             /* lockfunc, lockarg */
  540                                 &sc->mfi_tb_ioc_init_dmat)) {
  541                         device_printf(sc->mfi_dev,
  542                             "Cannot allocate comms DMA tag\n");
  543                         return (ENOMEM);
  544                 }
  545                 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
  546                     (void **)&sc->mfi_tb_ioc_init_desc,
  547                     BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
  548                         device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
  549                         return (ENOMEM);
  550                 }
  551                 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
  552                 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
  553                 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
  554                     &sc->mfi_tb_ioc_init_busaddr, 0);
  555         }
  556         /*
  557          * Create the dma tag for data buffers.  Used both for block I/O
  558          * and for various internal data queries.
  559          */
  560         if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  561                                 1, 0,                   /* algnmnt, boundary */
  562                                 BUS_SPACE_MAXADDR,      /* lowaddr */
  563                                 BUS_SPACE_MAXADDR,      /* highaddr */
  564                                 NULL, NULL,             /* filter, filterarg */
  565                                 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
  566                                 sc->mfi_max_sge,        /* nsegments */
  567                                 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
  568                                 BUS_DMA_ALLOCNOW,       /* flags */
  569                                 busdma_lock_mutex,      /* lockfunc */
  570                                 &sc->mfi_io_lock,       /* lockfuncarg */
  571                                 &sc->mfi_buffer_dmat)) {
  572                 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
  573                 return (ENOMEM);
  574         }
  575 
  576         /*
  577          * Allocate DMA memory for the comms queues.  Keep it under 4GB for
  578          * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
  579          * entry, so the calculated size here will be will be 1 more than
  580          * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
  581          */
  582         commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
  583             sizeof(struct mfi_hwcomms);
  584         if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  585                                 1, 0,                   /* algnmnt, boundary */
  586                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  587                                 BUS_SPACE_MAXADDR,      /* highaddr */
  588                                 NULL, NULL,             /* filter, filterarg */
  589                                 commsz,                 /* maxsize */
  590                                 1,                      /* msegments */
  591                                 commsz,                 /* maxsegsize */
  592                                 0,                      /* flags */
  593                                 NULL, NULL,             /* lockfunc, lockarg */
  594                                 &sc->mfi_comms_dmat)) {
  595                 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
  596                 return (ENOMEM);
  597         }
  598         if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
  599             BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
  600                 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
  601                 return (ENOMEM);
  602         }
  603         bzero(sc->mfi_comms, commsz);
  604         bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
  605             sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
  606         /*
  607          * Allocate DMA memory for the command frames.  Keep them in the
  608          * lower 4GB for efficiency.  Calculate the size of the commands at
  609          * the same time; each command is one 64 byte frame plus a set of
  610          * additional frames for holding sg lists or other data.
  611          * The assumption here is that the SG list will start at the second
  612          * frame and not use the unused bytes in the first frame.  While this
  613          * isn't technically correct, it simplifies the calculation and allows
  614          * for command frames that might be larger than an mfi_io_frame.
  615          */
  616         if (sizeof(bus_addr_t) == 8) {
  617                 sc->mfi_sge_size = sizeof(struct mfi_sg64);
  618                 sc->mfi_flags |= MFI_FLAGS_SG64;
  619         } else {
  620                 sc->mfi_sge_size = sizeof(struct mfi_sg32);
  621         }
  622         if (sc->mfi_flags & MFI_FLAGS_SKINNY)
  623                 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
  624         frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
  625         sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
  626         framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
  627         if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  628                                 64, 0,                  /* algnmnt, boundary */
  629                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  630                                 BUS_SPACE_MAXADDR,      /* highaddr */
  631                                 NULL, NULL,             /* filter, filterarg */
  632                                 framessz,               /* maxsize */
  633                                 1,                      /* nsegments */
  634                                 framessz,               /* maxsegsize */
  635                                 0,                      /* flags */
  636                                 NULL, NULL,             /* lockfunc, lockarg */
  637                                 &sc->mfi_frames_dmat)) {
  638                 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
  639                 return (ENOMEM);
  640         }
  641         if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
  642             BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
  643                 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
  644                 return (ENOMEM);
  645         }
  646         bzero(sc->mfi_frames, framessz);
  647         bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
  648             sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
  649         /*
  650          * Allocate DMA memory for the frame sense data.  Keep them in the
  651          * lower 4GB for efficiency
  652          */
  653         sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
  654         if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  655                                 4, 0,                   /* algnmnt, boundary */
  656                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  657                                 BUS_SPACE_MAXADDR,      /* highaddr */
  658                                 NULL, NULL,             /* filter, filterarg */
  659                                 sensesz,                /* maxsize */
  660                                 1,                      /* nsegments */
  661                                 sensesz,                /* maxsegsize */
  662                                 0,                      /* flags */
  663                                 NULL, NULL,             /* lockfunc, lockarg */
  664                                 &sc->mfi_sense_dmat)) {
  665                 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
  666                 return (ENOMEM);
  667         }
  668         if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
  669             BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
  670                 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
  671                 return (ENOMEM);
  672         }
  673         bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
  674             sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
  675         if ((error = mfi_alloc_commands(sc)) != 0)
  676                 return (error);
  677 
  678         /* Before moving the FW to operational state, check whether
  679          * hostmemory is required by the FW or not
  680          */
  681 
  682         /* ThunderBolt MFI_IOC2 INIT */
  683         if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
  684                 sc->mfi_disable_intr(sc);
  685                 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
  686                         device_printf(sc->mfi_dev,
  687                             "TB Init has failed with error %d\n",error);
  688                         return error;
  689                 }
  690 
  691                 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
  692                         return error;
  693                 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
  694                     INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
  695                     &sc->mfi_intr)) {
  696                         device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
  697                         return (EINVAL);
  698                 }
  699                 sc->mfi_intr_ptr = mfi_intr_tbolt;
  700                 sc->mfi_enable_intr(sc);
  701         } else {
  702                 if ((error = mfi_comms_init(sc)) != 0)
  703                         return (error);
  704 
  705                 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
  706                     INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
  707                         device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
  708                         return (EINVAL);
  709                 }
  710                 sc->mfi_intr_ptr = mfi_intr;
  711                 sc->mfi_enable_intr(sc);
  712         }
  713         if ((error = mfi_get_controller_info(sc)) != 0)
  714                 return (error);
  715         sc->disableOnlineCtrlReset = 0;
  716 
  717         /* Register a config hook to probe the bus for arrays */
  718         sc->mfi_ich.ich_func = mfi_startup;
  719         sc->mfi_ich.ich_arg = sc;
  720         if (config_intrhook_establish(&sc->mfi_ich) != 0) {
  721                 device_printf(sc->mfi_dev, "Cannot establish configuration "
  722                     "hook\n");
  723                 return (EINVAL);
  724         }
  725         if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
  726                 mtx_unlock(&sc->mfi_io_lock);
  727                 return (error);
  728         }
  729 
  730         /*
  731          * Register a shutdown handler.
  732          */
  733         if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
  734             sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
  735                 device_printf(sc->mfi_dev, "Warning: shutdown event "
  736                     "registration failed\n");
  737         }
  738 
  739         /*
  740          * Create the control device for doing management
  741          */
  742         unit = device_get_unit(sc->mfi_dev);
  743         sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
  744             0640, "mfi%d", unit);
  745         if (unit == 0)
  746                 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
  747         if (sc->mfi_cdev != NULL)
  748                 sc->mfi_cdev->si_drv1 = sc;
  749         SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
  750             SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
  751             OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
  752             &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
  753         SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
  754             SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
  755             OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
  756             &sc->mfi_keep_deleted_volumes, 0,
  757             "Don't detach the mfid device for a busy volume that is deleted");
  758 
  759         device_add_child(sc->mfi_dev, "mfip", -1);
  760         bus_generic_attach(sc->mfi_dev);
  761 
  762         /* Start the timeout watchdog */
  763         callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
  764         callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
  765             mfi_timeout, sc);
  766 
  767         if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
  768                 mfi_tbolt_sync_map_info(sc);
  769         }
  770 
  771         return (0);
  772 }
  773 
  774 static int
  775 mfi_alloc_commands(struct mfi_softc *sc)
  776 {
  777         struct mfi_command *cm;
  778         int i, ncmds;
  779 
  780         /*
  781          * XXX Should we allocate all the commands up front, or allocate on
  782          * demand later like 'aac' does?
  783          */
  784         ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
  785         if (bootverbose)
  786                 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
  787                    "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
  788 
  789         sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
  790             M_WAITOK | M_ZERO);
  791 
  792         for (i = 0; i < ncmds; i++) {
  793                 cm = &sc->mfi_commands[i];
  794                 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
  795                     sc->mfi_cmd_size * i);
  796                 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
  797                     sc->mfi_cmd_size * i;
  798                 cm->cm_frame->header.context = i;
  799                 cm->cm_sense = &sc->mfi_sense[i];
  800                 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
  801                 cm->cm_sc = sc;
  802                 cm->cm_index = i;
  803                 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
  804                     &cm->cm_dmamap) == 0) {
  805                         mtx_lock(&sc->mfi_io_lock);
  806                         mfi_release_command(cm);
  807                         mtx_unlock(&sc->mfi_io_lock);
  808                 }
  809                 else
  810                         break;
  811                 sc->mfi_total_cmds++;
  812         }
  813 
  814         return (0);
  815 }
  816 
  817 void
  818 mfi_release_command(struct mfi_command *cm)
  819 {
  820         struct mfi_frame_header *hdr;
  821         uint32_t *hdr_data;
  822 
  823         mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
  824 
  825         /*
  826          * Zero out the important fields of the frame, but make sure the
  827          * context field is preserved.  For efficiency, handle the fields
  828          * as 32 bit words.  Clear out the first S/G entry too for safety.
  829          */
  830         hdr = &cm->cm_frame->header;
  831         if (cm->cm_data != NULL && hdr->sg_count) {
  832                 cm->cm_sg->sg32[0].len = 0;
  833                 cm->cm_sg->sg32[0].addr = 0;
  834         }
  835 
  836         hdr_data = (uint32_t *)cm->cm_frame;
  837         hdr_data[0] = 0;        /* cmd, sense_len, cmd_status, scsi_status */
  838         hdr_data[1] = 0;        /* target_id, lun_id, cdb_len, sg_count */
  839         hdr_data[4] = 0;        /* flags, timeout */
  840         hdr_data[5] = 0;        /* data_len */
  841 
  842         cm->cm_extra_frames = 0;
  843         cm->cm_flags = 0;
  844         cm->cm_complete = NULL;
  845         cm->cm_private = NULL;
  846         cm->cm_data = NULL;
  847         cm->cm_sg = 0;
  848         cm->cm_total_frame_size = 0;
  849         cm->retry_for_fw_reset = 0;
  850 
  851         mfi_enqueue_free(cm);
  852 }
  853 
  854 int
  855 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
  856     uint32_t opcode, void **bufp, size_t bufsize)
  857 {
  858         struct mfi_command *cm;
  859         struct mfi_dcmd_frame *dcmd;
  860         void *buf = NULL;
  861         uint32_t context = 0;
  862 
  863         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
  864 
  865         cm = mfi_dequeue_free(sc);
  866         if (cm == NULL)
  867                 return (EBUSY);
  868 
  869         /* Zero out the MFI frame */
  870         context = cm->cm_frame->header.context;
  871         bzero(cm->cm_frame, sizeof(union mfi_frame));
  872         cm->cm_frame->header.context = context;
  873 
  874         if ((bufsize > 0) && (bufp != NULL)) {
  875                 if (*bufp == NULL) {
  876                         buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
  877                         if (buf == NULL) {
  878                                 mfi_release_command(cm);
  879                                 return (ENOMEM);
  880                         }
  881                         *bufp = buf;
  882                 } else {
  883                         buf = *bufp;
  884                 }
  885         }
  886 
  887         dcmd =  &cm->cm_frame->dcmd;
  888         bzero(dcmd->mbox, MFI_MBOX_SIZE);
  889         dcmd->header.cmd = MFI_CMD_DCMD;
  890         dcmd->header.timeout = 0;
  891         dcmd->header.flags = 0;
  892         dcmd->header.data_len = bufsize;
  893         dcmd->header.scsi_status = 0;
  894         dcmd->opcode = opcode;
  895         cm->cm_sg = &dcmd->sgl;
  896         cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
  897         cm->cm_flags = 0;
  898         cm->cm_data = buf;
  899         cm->cm_private = buf;
  900         cm->cm_len = bufsize;
  901 
  902         *cmp = cm;
  903         if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
  904                 *bufp = buf;
  905         return (0);
  906 }
  907 
  908 static int
  909 mfi_comms_init(struct mfi_softc *sc)
  910 {
  911         struct mfi_command *cm;
  912         struct mfi_init_frame *init;
  913         struct mfi_init_qinfo *qinfo;
  914         int error;
  915         uint32_t context = 0;
  916 
  917         mtx_lock(&sc->mfi_io_lock);
  918         if ((cm = mfi_dequeue_free(sc)) == NULL)
  919                 return (EBUSY);
  920 
  921         /* Zero out the MFI frame */
  922         context = cm->cm_frame->header.context;
  923         bzero(cm->cm_frame, sizeof(union mfi_frame));
  924         cm->cm_frame->header.context = context;
  925 
  926         /*
  927          * Abuse the SG list area of the frame to hold the init_qinfo
  928          * object;
  929          */
  930         init = &cm->cm_frame->init;
  931         qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
  932 
  933         bzero(qinfo, sizeof(struct mfi_init_qinfo));
  934         qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
  935         qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
  936             offsetof(struct mfi_hwcomms, hw_reply_q);
  937         qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
  938             offsetof(struct mfi_hwcomms, hw_pi);
  939         qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
  940             offsetof(struct mfi_hwcomms, hw_ci);
  941 
  942         init->header.cmd = MFI_CMD_INIT;
  943         init->header.data_len = sizeof(struct mfi_init_qinfo);
  944         init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
  945         cm->cm_data = NULL;
  946         cm->cm_flags = MFI_CMD_POLLED;
  947 
  948         if ((error = mfi_mapcmd(sc, cm)) != 0) {
  949                 device_printf(sc->mfi_dev, "failed to send init command\n");
  950                 mtx_unlock(&sc->mfi_io_lock);
  951                 return (error);
  952         }
  953         mfi_release_command(cm);
  954         mtx_unlock(&sc->mfi_io_lock);
  955 
  956         return (0);
  957 }
  958 
  959 static int
  960 mfi_get_controller_info(struct mfi_softc *sc)
  961 {
  962         struct mfi_command *cm = NULL;
  963         struct mfi_ctrl_info *ci = NULL;
  964         uint32_t max_sectors_1, max_sectors_2;
  965         int error;
  966 
  967         mtx_lock(&sc->mfi_io_lock);
  968         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
  969             (void **)&ci, sizeof(*ci));
  970         if (error)
  971                 goto out;
  972         cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
  973 
  974         if ((error = mfi_mapcmd(sc, cm)) != 0) {
  975                 device_printf(sc->mfi_dev, "Failed to get controller info\n");
  976                 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
  977                     MFI_SECTOR_LEN;
  978                 error = 0;
  979                 goto out;
  980         }
  981 
  982         bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
  983             BUS_DMASYNC_POSTREAD);
  984         bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
  985 
  986         max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
  987         max_sectors_2 = ci->max_request_size;
  988         sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
  989         sc->disableOnlineCtrlReset =
  990             ci->properties.OnOffProperties.disableOnlineCtrlReset;
  991 
  992 out:
  993         if (ci)
  994                 free(ci, M_MFIBUF);
  995         if (cm)
  996                 mfi_release_command(cm);
  997         mtx_unlock(&sc->mfi_io_lock);
  998         return (error);
  999 }
 1000 
 1001 static int
 1002 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
 1003 {
 1004         struct mfi_command *cm = NULL;
 1005         int error;
 1006 
 1007         mtx_lock(&sc->mfi_io_lock);
 1008         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
 1009             (void **)log_state, sizeof(**log_state));
 1010         if (error)
 1011                 goto out;
 1012         cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
 1013 
 1014         if ((error = mfi_mapcmd(sc, cm)) != 0) {
 1015                 device_printf(sc->mfi_dev, "Failed to get log state\n");
 1016                 goto out;
 1017         }
 1018 
 1019         bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
 1020             BUS_DMASYNC_POSTREAD);
 1021         bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
 1022 
 1023 out:
 1024         if (cm)
 1025                 mfi_release_command(cm);
 1026         mtx_unlock(&sc->mfi_io_lock);
 1027 
 1028         return (error);
 1029 }
 1030 
 1031 int
 1032 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
 1033 {
 1034         struct mfi_evt_log_state *log_state = NULL;
 1035         union mfi_evt class_locale;
 1036         int error = 0;
 1037         uint32_t seq;
 1038 
 1039         class_locale.members.reserved = 0;
 1040         class_locale.members.locale = mfi_event_locale;
 1041         class_locale.members.evt_class  = mfi_event_class;
 1042 
 1043         if (seq_start == 0) {
 1044                 error = mfi_get_log_state(sc, &log_state);
 1045                 sc->mfi_boot_seq_num = log_state->boot_seq_num;
 1046                 if (error) {
 1047                         if (log_state)
 1048                                 free(log_state, M_MFIBUF);
 1049                         return (error);
 1050                 }
 1051 
 1052                 /*
 1053                  * Walk through any events that fired since the last
 1054                  * shutdown.
 1055                  */
 1056                 mfi_parse_entries(sc, log_state->shutdown_seq_num,
 1057                     log_state->newest_seq_num);
 1058                 seq = log_state->newest_seq_num;
 1059         } else
 1060                 seq = seq_start;
 1061         mfi_aen_register(sc, seq, class_locale.word);
 1062         free(log_state, M_MFIBUF);
 1063 
 1064         return 0;
 1065 }
 1066 
 1067 int
 1068 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
 1069 {
 1070 
 1071         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1072         cm->cm_complete = NULL;
 1073 
 1074 
 1075         /*
 1076          * MegaCli can issue a DCMD of 0.  In this case do nothing
 1077          * and return 0 to it as status
 1078          */
 1079         if (cm->cm_frame->dcmd.opcode == 0) {
 1080                 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
 1081                 cm->cm_error = 0;
 1082                 return (cm->cm_error);
 1083         }
 1084         mfi_enqueue_ready(cm);
 1085         mfi_startio(sc);
 1086         if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
 1087                 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
 1088         return (cm->cm_error);
 1089 }
 1090 
 1091 void
 1092 mfi_free(struct mfi_softc *sc)
 1093 {
 1094         struct mfi_command *cm;
 1095         int i;
 1096 
 1097         callout_drain(&sc->mfi_watchdog_callout);
 1098 
 1099         if (sc->mfi_cdev != NULL)
 1100                 destroy_dev(sc->mfi_cdev);
 1101 
 1102         if (sc->mfi_total_cmds != 0) {
 1103                 for (i = 0; i < sc->mfi_total_cmds; i++) {
 1104                         cm = &sc->mfi_commands[i];
 1105                         bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
 1106                 }
 1107                 free(sc->mfi_commands, M_MFIBUF);
 1108         }
 1109 
 1110         if (sc->mfi_intr)
 1111                 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
 1112         if (sc->mfi_irq != NULL)
 1113                 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
 1114                     sc->mfi_irq);
 1115 
 1116         if (sc->mfi_sense_busaddr != 0)
 1117                 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
 1118         if (sc->mfi_sense != NULL)
 1119                 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
 1120                     sc->mfi_sense_dmamap);
 1121         if (sc->mfi_sense_dmat != NULL)
 1122                 bus_dma_tag_destroy(sc->mfi_sense_dmat);
 1123 
 1124         if (sc->mfi_frames_busaddr != 0)
 1125                 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
 1126         if (sc->mfi_frames != NULL)
 1127                 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
 1128                     sc->mfi_frames_dmamap);
 1129         if (sc->mfi_frames_dmat != NULL)
 1130                 bus_dma_tag_destroy(sc->mfi_frames_dmat);
 1131 
 1132         if (sc->mfi_comms_busaddr != 0)
 1133                 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
 1134         if (sc->mfi_comms != NULL)
 1135                 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
 1136                     sc->mfi_comms_dmamap);
 1137         if (sc->mfi_comms_dmat != NULL)
 1138                 bus_dma_tag_destroy(sc->mfi_comms_dmat);
 1139 
 1140         /* ThunderBolt contiguous memory free here */
 1141         if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
 1142                 if (sc->mfi_tb_busaddr != 0)
 1143                         bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
 1144                 if (sc->request_message_pool != NULL)
 1145                         bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
 1146                             sc->mfi_tb_dmamap);
 1147                 if (sc->mfi_tb_dmat != NULL)
 1148                         bus_dma_tag_destroy(sc->mfi_tb_dmat);
 1149 
 1150                 /* Version buffer memory free */
 1151                 /* Start LSIP200113393 */
 1152                 if (sc->verbuf_h_busaddr != 0)
 1153                         bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
 1154                 if (sc->verbuf != NULL)
 1155                         bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
 1156                             sc->verbuf_h_dmamap);
 1157                 if (sc->verbuf_h_dmat != NULL)
 1158                         bus_dma_tag_destroy(sc->verbuf_h_dmat);
 1159 
 1160                 /* End LSIP200113393 */
 1161                 /* ThunderBolt INIT packet memory Free */
 1162                 if (sc->mfi_tb_init_busaddr != 0)
 1163                         bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
 1164                 if (sc->mfi_tb_init != NULL)
 1165                         bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
 1166                             sc->mfi_tb_init_dmamap);
 1167                 if (sc->mfi_tb_init_dmat != NULL)
 1168                         bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
 1169 
 1170                 /* ThunderBolt IOC Init Desc memory free here */
 1171                 if (sc->mfi_tb_ioc_init_busaddr != 0)
 1172                         bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
 1173                             sc->mfi_tb_ioc_init_dmamap);
 1174                 if (sc->mfi_tb_ioc_init_desc != NULL)
 1175                         bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
 1176                             sc->mfi_tb_ioc_init_desc,
 1177                             sc->mfi_tb_ioc_init_dmamap);
 1178                 if (sc->mfi_tb_ioc_init_dmat != NULL)
 1179                         bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
 1180                 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
 1181                         if (sc->mfi_cmd_pool_tbolt != NULL) {
 1182                                 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
 1183                                         free(sc->mfi_cmd_pool_tbolt[i],
 1184                                             M_MFIBUF);
 1185                                         sc->mfi_cmd_pool_tbolt[i] = NULL;
 1186                                 }
 1187                         }
 1188                 }
 1189                 if (sc->mfi_cmd_pool_tbolt != NULL) {
 1190                         free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
 1191                         sc->mfi_cmd_pool_tbolt = NULL;
 1192                 }
 1193                 if (sc->request_desc_pool != NULL) {
 1194                         free(sc->request_desc_pool, M_MFIBUF);
 1195                         sc->request_desc_pool = NULL;
 1196                 }
 1197         }
 1198         if (sc->mfi_buffer_dmat != NULL)
 1199                 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
 1200         if (sc->mfi_parent_dmat != NULL)
 1201                 bus_dma_tag_destroy(sc->mfi_parent_dmat);
 1202 
 1203         if (mtx_initialized(&sc->mfi_io_lock)) {
 1204                 mtx_destroy(&sc->mfi_io_lock);
 1205                 sx_destroy(&sc->mfi_config_lock);
 1206         }
 1207 
 1208         return;
 1209 }
 1210 
 1211 static void
 1212 mfi_startup(void *arg)
 1213 {
 1214         struct mfi_softc *sc;
 1215 
 1216         sc = (struct mfi_softc *)arg;
 1217 
 1218         config_intrhook_disestablish(&sc->mfi_ich);
 1219 
 1220         sc->mfi_enable_intr(sc);
 1221         sx_xlock(&sc->mfi_config_lock);
 1222         mtx_lock(&sc->mfi_io_lock);
 1223         mfi_ldprobe(sc);
 1224         if (sc->mfi_flags & MFI_FLAGS_SKINNY)
 1225             mfi_syspdprobe(sc);
 1226         mtx_unlock(&sc->mfi_io_lock);
 1227         sx_xunlock(&sc->mfi_config_lock);
 1228 }
 1229 
 1230 static void
 1231 mfi_intr(void *arg)
 1232 {
 1233         struct mfi_softc *sc;
 1234         struct mfi_command *cm;
 1235         uint32_t pi, ci, context;
 1236 
 1237         sc = (struct mfi_softc *)arg;
 1238 
 1239         if (sc->mfi_check_clear_intr(sc))
 1240                 return;
 1241 
 1242 restart:
 1243         pi = sc->mfi_comms->hw_pi;
 1244         ci = sc->mfi_comms->hw_ci;
 1245         mtx_lock(&sc->mfi_io_lock);
 1246         while (ci != pi) {
 1247                 context = sc->mfi_comms->hw_reply_q[ci];
 1248                 if (context < sc->mfi_max_fw_cmds) {
 1249                         cm = &sc->mfi_commands[context];
 1250                         mfi_remove_busy(cm);
 1251                         cm->cm_error = 0;
 1252                         mfi_complete(sc, cm);
 1253                 }
 1254                 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
 1255                         ci = 0;
 1256                 }
 1257         }
 1258 
 1259         sc->mfi_comms->hw_ci = ci;
 1260 
 1261         /* Give defered I/O a chance to run */
 1262         if (sc->mfi_flags & MFI_FLAGS_QFRZN)
 1263                 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
 1264         mfi_startio(sc);
 1265         mtx_unlock(&sc->mfi_io_lock);
 1266 
 1267         /*
 1268          * Dummy read to flush the bus; this ensures that the indexes are up
 1269          * to date.  Restart processing if more commands have come it.
 1270          */
 1271         (void)sc->mfi_read_fw_status(sc);
 1272         if (pi != sc->mfi_comms->hw_pi)
 1273                 goto restart;
 1274 
 1275         return;
 1276 }
 1277 
 1278 int
 1279 mfi_shutdown(struct mfi_softc *sc)
 1280 {
 1281         struct mfi_dcmd_frame *dcmd;
 1282         struct mfi_command *cm;
 1283         int error;
 1284 
 1285 
 1286         if (sc->mfi_aen_cm)
 1287                 sc->cm_aen_abort = 1;
 1288         if (sc->mfi_aen_cm != NULL)
 1289                 mfi_abort(sc, &sc->mfi_aen_cm);
 1290 
 1291         if (sc->mfi_map_sync_cm)
 1292                 sc->cm_map_abort = 1;
 1293         if (sc->mfi_map_sync_cm != NULL)
 1294                 mfi_abort(sc, &sc->mfi_map_sync_cm);
 1295 
 1296         mtx_lock(&sc->mfi_io_lock);
 1297         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
 1298         if (error) {
 1299                 mtx_unlock(&sc->mfi_io_lock);
 1300                 return (error);
 1301         }
 1302 
 1303         dcmd = &cm->cm_frame->dcmd;
 1304         dcmd->header.flags = MFI_FRAME_DIR_NONE;
 1305         cm->cm_flags = MFI_CMD_POLLED;
 1306         cm->cm_data = NULL;
 1307 
 1308         if ((error = mfi_mapcmd(sc, cm)) != 0) {
 1309                 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
 1310         }
 1311 
 1312         mfi_release_command(cm);
 1313         mtx_unlock(&sc->mfi_io_lock);
 1314         return (error);
 1315 }
 1316 
 1317 static void
 1318 mfi_syspdprobe(struct mfi_softc *sc)
 1319 {
 1320         struct mfi_frame_header *hdr;
 1321         struct mfi_command *cm = NULL;
 1322         struct mfi_pd_list *pdlist = NULL;
 1323         struct mfi_system_pd *syspd, *tmp;
 1324         struct mfi_system_pending *syspd_pend;
 1325         int error, i, found;
 1326 
 1327         sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
 1328         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1329         /* Add SYSTEM PD's */
 1330         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
 1331             (void **)&pdlist, sizeof(*pdlist));
 1332         if (error) {
 1333                 device_printf(sc->mfi_dev,
 1334                     "Error while forming SYSTEM PD list\n");
 1335                 goto out;
 1336         }
 1337 
 1338         cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
 1339         cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
 1340         cm->cm_frame->dcmd.mbox[1] = 0;
 1341         if (mfi_mapcmd(sc, cm) != 0) {
 1342                 device_printf(sc->mfi_dev,
 1343                     "Failed to get syspd device listing\n");
 1344                 goto out;
 1345         }
 1346         bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
 1347             BUS_DMASYNC_POSTREAD);
 1348         bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
 1349         hdr = &cm->cm_frame->header;
 1350         if (hdr->cmd_status != MFI_STAT_OK) {
 1351                 device_printf(sc->mfi_dev,
 1352                     "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
 1353                 goto out;
 1354         }
 1355         /* Get each PD and add it to the system */
 1356         for (i = 0; i < pdlist->count; i++) {
 1357                 if (pdlist->addr[i].device_id ==
 1358                     pdlist->addr[i].encl_device_id)
 1359                         continue;
 1360                 found = 0;
 1361                 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
 1362                         if (syspd->pd_id == pdlist->addr[i].device_id)
 1363                                 found = 1;
 1364                 }
 1365                 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
 1366                         if (syspd_pend->pd_id == pdlist->addr[i].device_id)
 1367                                 found = 1;
 1368                 }
 1369                 if (found == 0)
 1370                         mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
 1371         }
 1372         /* Delete SYSPD's whose state has been changed */
 1373         TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
 1374                 found = 0;
 1375                 for (i = 0; i < pdlist->count; i++) {
 1376                         if (syspd->pd_id == pdlist->addr[i].device_id)
 1377                                 found = 1;
 1378                 }
 1379                 if (found == 0) {
 1380                         printf("DELETE\n");
 1381                         mtx_unlock(&sc->mfi_io_lock);
 1382                         mtx_lock(&Giant);
 1383                         device_delete_child(sc->mfi_dev, syspd->pd_dev);
 1384                         mtx_unlock(&Giant);
 1385                         mtx_lock(&sc->mfi_io_lock);
 1386                 }
 1387         }
 1388 out:
 1389         if (pdlist)
 1390             free(pdlist, M_MFIBUF);
 1391         if (cm)
 1392             mfi_release_command(cm);
 1393 
 1394         return;
 1395 }
 1396 
 1397 static void
 1398 mfi_ldprobe(struct mfi_softc *sc)
 1399 {
 1400         struct mfi_frame_header *hdr;
 1401         struct mfi_command *cm = NULL;
 1402         struct mfi_ld_list *list = NULL;
 1403         struct mfi_disk *ld;
 1404         struct mfi_disk_pending *ld_pend;
 1405         int error, i;
 1406 
 1407         sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
 1408         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1409 
 1410         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
 1411             (void **)&list, sizeof(*list));
 1412         if (error)
 1413                 goto out;
 1414 
 1415         cm->cm_flags = MFI_CMD_DATAIN;
 1416         if (mfi_wait_command(sc, cm) != 0) {
 1417                 device_printf(sc->mfi_dev, "Failed to get device listing\n");
 1418                 goto out;
 1419         }
 1420 
 1421         hdr = &cm->cm_frame->header;
 1422         if (hdr->cmd_status != MFI_STAT_OK) {
 1423                 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
 1424                     hdr->cmd_status);
 1425                 goto out;
 1426         }
 1427 
 1428         for (i = 0; i < list->ld_count; i++) {
 1429                 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 1430                         if (ld->ld_id == list->ld_list[i].ld.v.target_id)
 1431                                 goto skip_add;
 1432                 }
 1433                 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
 1434                         if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
 1435                                 goto skip_add;
 1436                 }
 1437                 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
 1438         skip_add:;
 1439         }
 1440 out:
 1441         if (list)
 1442                 free(list, M_MFIBUF);
 1443         if (cm)
 1444                 mfi_release_command(cm);
 1445 
 1446         return;
 1447 }
 1448 
 1449 /*
 1450  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
 1451  * the bits in 24-31 are all set, then it is the number of seconds since
 1452  * boot.
 1453  */
 1454 static const char *
 1455 format_timestamp(uint32_t timestamp)
 1456 {
 1457         static char buffer[32];
 1458 
 1459         if ((timestamp & 0xff000000) == 0xff000000)
 1460                 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
 1461                     0x00ffffff);
 1462         else
 1463                 snprintf(buffer, sizeof(buffer), "%us", timestamp);
 1464         return (buffer);
 1465 }
 1466 
 1467 static const char *
 1468 format_class(int8_t class)
 1469 {
 1470         static char buffer[6];
 1471 
 1472         switch (class) {
 1473         case MFI_EVT_CLASS_DEBUG:
 1474                 return ("debug");
 1475         case MFI_EVT_CLASS_PROGRESS:
 1476                 return ("progress");
 1477         case MFI_EVT_CLASS_INFO:
 1478                 return ("info");
 1479         case MFI_EVT_CLASS_WARNING:
 1480                 return ("WARN");
 1481         case MFI_EVT_CLASS_CRITICAL:
 1482                 return ("CRIT");
 1483         case MFI_EVT_CLASS_FATAL:
 1484                 return ("FATAL");
 1485         case MFI_EVT_CLASS_DEAD:
 1486                 return ("DEAD");
 1487         default:
 1488                 snprintf(buffer, sizeof(buffer), "%d", class);
 1489                 return (buffer);
 1490         }
 1491 }
 1492 
 1493 static void
 1494 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
 1495 {
 1496         struct mfi_system_pd *syspd = NULL;
 1497 
 1498         device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
 1499             format_timestamp(detail->time), detail->evt_class.members.locale,
 1500             format_class(detail->evt_class.members.evt_class),
 1501             detail->description);
 1502 
 1503         /* Don't act on old AEN's or while shutting down */
 1504         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
 1505                 return;
 1506 
 1507         switch (detail->arg_type) {
 1508         case MR_EVT_ARGS_NONE:
 1509                 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
 1510                     device_printf(sc->mfi_dev, "HostBus scan raised\n");
 1511                         if (mfi_detect_jbod_change) {
 1512                                 /*
 1513                                  * Probe for new SYSPD's and Delete
 1514                                  * invalid SYSPD's
 1515                                  */
 1516                                 sx_xlock(&sc->mfi_config_lock);
 1517                                 mtx_lock(&sc->mfi_io_lock);
 1518                                 mfi_syspdprobe(sc);
 1519                                 mtx_unlock(&sc->mfi_io_lock);
 1520                                 sx_xunlock(&sc->mfi_config_lock);
 1521                         }
 1522                 }
 1523                 break;
 1524         case MR_EVT_ARGS_LD_STATE:
 1525                 /* During load time driver reads all the events starting
 1526                  * from the one that has been logged after shutdown. Avoid
 1527                  * these old events.
 1528                  */
 1529                 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
 1530                         /* Remove the LD */
 1531                         struct mfi_disk *ld;
 1532                         TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 1533                                 if (ld->ld_id ==
 1534                                     detail->args.ld_state.ld.target_id)
 1535                                         break;
 1536                         }
 1537                         /*
 1538                         Fix: for kernel panics when SSCD is removed
 1539                         KASSERT(ld != NULL, ("volume dissappeared"));
 1540                         */
 1541                         if (ld != NULL) {
 1542                                 mtx_lock(&Giant);
 1543                                 device_delete_child(sc->mfi_dev, ld->ld_dev);
 1544                                 mtx_unlock(&Giant);
 1545                         }
 1546                 }
 1547                 break;
 1548         case MR_EVT_ARGS_PD:
 1549                 if (detail->code == MR_EVT_PD_REMOVED) {
 1550                         if (mfi_detect_jbod_change) {
 1551                                 /*
 1552                                  * If the removed device is a SYSPD then
 1553                                  * delete it
 1554                                  */
 1555                                 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
 1556                                     pd_link) {
 1557                                         if (syspd->pd_id ==
 1558                                             detail->args.pd.device_id) {
 1559                                                 mtx_lock(&Giant);
 1560                                                 device_delete_child(
 1561                                                     sc->mfi_dev,
 1562                                                     syspd->pd_dev);
 1563                                                 mtx_unlock(&Giant);
 1564                                                 break;
 1565                                         }
 1566                                 }
 1567                         }
 1568                 }
 1569                 if (detail->code == MR_EVT_PD_INSERTED) {
 1570                         if (mfi_detect_jbod_change) {
 1571                                 /* Probe for new SYSPD's */
 1572                                 sx_xlock(&sc->mfi_config_lock);
 1573                                 mtx_lock(&sc->mfi_io_lock);
 1574                                 mfi_syspdprobe(sc);
 1575                                 mtx_unlock(&sc->mfi_io_lock);
 1576                                 sx_xunlock(&sc->mfi_config_lock);
 1577                         }
 1578                 }
 1579                 break;
 1580         }
 1581 }
 1582 
 1583 static void
 1584 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
 1585 {
 1586         struct mfi_evt_queue_elm *elm;
 1587 
 1588         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1589         elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
 1590         if (elm == NULL)
 1591                 return;
 1592         memcpy(&elm->detail, detail, sizeof(*detail));
 1593         TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
 1594         taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
 1595 }
 1596 
 1597 static void
 1598 mfi_handle_evt(void *context, int pending)
 1599 {
 1600         TAILQ_HEAD(,mfi_evt_queue_elm) queue;
 1601         struct mfi_softc *sc;
 1602         struct mfi_evt_queue_elm *elm;
 1603 
 1604         sc = context;
 1605         TAILQ_INIT(&queue);
 1606         mtx_lock(&sc->mfi_io_lock);
 1607         TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
 1608         mtx_unlock(&sc->mfi_io_lock);
 1609         while ((elm = TAILQ_FIRST(&queue)) != NULL) {
 1610                 TAILQ_REMOVE(&queue, elm, link);
 1611                 mfi_decode_evt(sc, &elm->detail);
 1612                 free(elm, M_MFIBUF);
 1613         }
 1614 }
 1615 
 1616 static int
 1617 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
 1618 {
 1619         struct mfi_command *cm;
 1620         struct mfi_dcmd_frame *dcmd;
 1621         union mfi_evt current_aen, prior_aen;
 1622         struct mfi_evt_detail *ed = NULL;
 1623         int error = 0;
 1624 
 1625         current_aen.word = locale;
 1626         if (sc->mfi_aen_cm != NULL) {
 1627                 prior_aen.word =
 1628                     ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
 1629                 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
 1630                     !((prior_aen.members.locale & current_aen.members.locale)
 1631                     ^current_aen.members.locale)) {
 1632                         return (0);
 1633                 } else {
 1634                         prior_aen.members.locale |= current_aen.members.locale;
 1635                         if (prior_aen.members.evt_class
 1636                             < current_aen.members.evt_class)
 1637                                 current_aen.members.evt_class =
 1638                                     prior_aen.members.evt_class;
 1639                         mfi_abort(sc, &sc->mfi_aen_cm);
 1640                 }
 1641         }
 1642 
 1643         mtx_lock(&sc->mfi_io_lock);
 1644         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
 1645             (void **)&ed, sizeof(*ed));
 1646         mtx_unlock(&sc->mfi_io_lock);
 1647         if (error) {
 1648                 goto out;
 1649         }
 1650 
 1651         dcmd = &cm->cm_frame->dcmd;
 1652         ((uint32_t *)&dcmd->mbox)[0] = seq;
 1653         ((uint32_t *)&dcmd->mbox)[1] = locale;
 1654         cm->cm_flags = MFI_CMD_DATAIN;
 1655         cm->cm_complete = mfi_aen_complete;
 1656 
 1657         sc->last_seq_num = seq;
 1658         sc->mfi_aen_cm = cm;
 1659 
 1660         mtx_lock(&sc->mfi_io_lock);
 1661         mfi_enqueue_ready(cm);
 1662         mfi_startio(sc);
 1663         mtx_unlock(&sc->mfi_io_lock);
 1664 
 1665 out:
 1666         return (error);
 1667 }
 1668 
 1669 static void
 1670 mfi_aen_complete(struct mfi_command *cm)
 1671 {
 1672         struct mfi_frame_header *hdr;
 1673         struct mfi_softc *sc;
 1674         struct mfi_evt_detail *detail;
 1675         struct mfi_aen *mfi_aen_entry, *tmp;
 1676         int seq = 0, aborted = 0;
 1677 
 1678         sc = cm->cm_sc;
 1679         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1680 
 1681         hdr = &cm->cm_frame->header;
 1682 
 1683         if (sc->mfi_aen_cm == NULL)
 1684                 return;
 1685 
 1686         if (sc->cm_aen_abort ||
 1687             hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
 1688                 sc->cm_aen_abort = 0;
 1689                 aborted = 1;
 1690         } else {
 1691                 sc->mfi_aen_triggered = 1;
 1692                 if (sc->mfi_poll_waiting) {
 1693                         sc->mfi_poll_waiting = 0;
 1694                         selwakeup(&sc->mfi_select);
 1695                 }
 1696                 detail = cm->cm_data;
 1697                 mfi_queue_evt(sc, detail);
 1698                 seq = detail->seq + 1;
 1699                 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
 1700                     tmp) {
 1701                         TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
 1702                             aen_link);
 1703                         PROC_LOCK(mfi_aen_entry->p);
 1704                         kern_psignal(mfi_aen_entry->p, SIGIO);
 1705                         PROC_UNLOCK(mfi_aen_entry->p);
 1706                         free(mfi_aen_entry, M_MFIBUF);
 1707                 }
 1708         }
 1709 
 1710         free(cm->cm_data, M_MFIBUF);
 1711         sc->mfi_aen_cm = NULL;
 1712         wakeup(&sc->mfi_aen_cm);
 1713         mfi_release_command(cm);
 1714 
 1715         /* set it up again so the driver can catch more events */
 1716         if (!aborted) {
 1717                 mtx_unlock(&sc->mfi_io_lock);
 1718                 mfi_aen_setup(sc, seq);
 1719                 mtx_lock(&sc->mfi_io_lock);
 1720         }
 1721 }
 1722 
 1723 #define MAX_EVENTS 15
 1724 
 1725 static int
 1726 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
 1727 {
 1728         struct mfi_command *cm;
 1729         struct mfi_dcmd_frame *dcmd;
 1730         struct mfi_evt_list *el;
 1731         union mfi_evt class_locale;
 1732         int error, i, seq, size;
 1733 
 1734         class_locale.members.reserved = 0;
 1735         class_locale.members.locale = mfi_event_locale;
 1736         class_locale.members.evt_class  = mfi_event_class;
 1737 
 1738         size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
 1739                 * (MAX_EVENTS - 1);
 1740         el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
 1741         if (el == NULL)
 1742                 return (ENOMEM);
 1743 
 1744         for (seq = start_seq;;) {
 1745                 mtx_lock(&sc->mfi_io_lock);
 1746                 if ((cm = mfi_dequeue_free(sc)) == NULL) {
 1747                         free(el, M_MFIBUF);
 1748                         mtx_unlock(&sc->mfi_io_lock);
 1749                         return (EBUSY);
 1750                 }
 1751                 mtx_unlock(&sc->mfi_io_lock);
 1752 
 1753                 dcmd = &cm->cm_frame->dcmd;
 1754                 bzero(dcmd->mbox, MFI_MBOX_SIZE);
 1755                 dcmd->header.cmd = MFI_CMD_DCMD;
 1756                 dcmd->header.timeout = 0;
 1757                 dcmd->header.data_len = size;
 1758                 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
 1759                 ((uint32_t *)&dcmd->mbox)[0] = seq;
 1760                 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
 1761                 cm->cm_sg = &dcmd->sgl;
 1762                 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
 1763                 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
 1764                 cm->cm_data = el;
 1765                 cm->cm_len = size;
 1766 
 1767                 mtx_lock(&sc->mfi_io_lock);
 1768                 if ((error = mfi_mapcmd(sc, cm)) != 0) {
 1769                         device_printf(sc->mfi_dev,
 1770                             "Failed to get controller entries\n");
 1771                         mfi_release_command(cm);
 1772                         mtx_unlock(&sc->mfi_io_lock);
 1773                         break;
 1774                 }
 1775 
 1776                 mtx_unlock(&sc->mfi_io_lock);
 1777                 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
 1778                     BUS_DMASYNC_POSTREAD);
 1779                 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
 1780 
 1781                 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
 1782                         mtx_lock(&sc->mfi_io_lock);
 1783                         mfi_release_command(cm);
 1784                         mtx_unlock(&sc->mfi_io_lock);
 1785                         break;
 1786                 }
 1787                 if (dcmd->header.cmd_status != MFI_STAT_OK) {
 1788                         device_printf(sc->mfi_dev,
 1789                             "Error %d fetching controller entries\n",
 1790                             dcmd->header.cmd_status);
 1791                         mtx_lock(&sc->mfi_io_lock);
 1792                         mfi_release_command(cm);
 1793                         mtx_unlock(&sc->mfi_io_lock);
 1794                         break;
 1795                 }
 1796                 mtx_lock(&sc->mfi_io_lock);
 1797                 mfi_release_command(cm);
 1798                 mtx_unlock(&sc->mfi_io_lock);
 1799 
 1800                 for (i = 0; i < el->count; i++) {
 1801                         /*
 1802                          * If this event is newer than 'stop_seq' then
 1803                          * break out of the loop.  Note that the log
 1804                          * is a circular buffer so we have to handle
 1805                          * the case that our stop point is earlier in
 1806                          * the buffer than our start point.
 1807                          */
 1808                         if (el->event[i].seq >= stop_seq) {
 1809                                 if (start_seq <= stop_seq)
 1810                                         break;
 1811                                 else if (el->event[i].seq < start_seq)
 1812                                         break;
 1813                         }
 1814                         mtx_lock(&sc->mfi_io_lock);
 1815                         mfi_queue_evt(sc, &el->event[i]);
 1816                         mtx_unlock(&sc->mfi_io_lock);
 1817                 }
 1818                 seq = el->event[el->count - 1].seq + 1;
 1819         }
 1820 
 1821         free(el, M_MFIBUF);
 1822         return (0);
 1823 }
 1824 
 1825 static int
 1826 mfi_add_ld(struct mfi_softc *sc, int id)
 1827 {
 1828         struct mfi_command *cm;
 1829         struct mfi_dcmd_frame *dcmd = NULL;
 1830         struct mfi_ld_info *ld_info = NULL;
 1831         struct mfi_disk_pending *ld_pend;
 1832         int error;
 1833 
 1834         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1835 
 1836         ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
 1837         if (ld_pend != NULL) {
 1838                 ld_pend->ld_id = id;
 1839                 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
 1840         }
 1841 
 1842         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
 1843             (void **)&ld_info, sizeof(*ld_info));
 1844         if (error) {
 1845                 device_printf(sc->mfi_dev,
 1846                     "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
 1847                 if (ld_info)
 1848                         free(ld_info, M_MFIBUF);
 1849                 return (error);
 1850         }
 1851         cm->cm_flags = MFI_CMD_DATAIN;
 1852         dcmd = &cm->cm_frame->dcmd;
 1853         dcmd->mbox[0] = id;
 1854         if (mfi_wait_command(sc, cm) != 0) {
 1855                 device_printf(sc->mfi_dev,
 1856                     "Failed to get logical drive: %d\n", id);
 1857                 free(ld_info, M_MFIBUF);
 1858                 return (0);
 1859         }
 1860         if (ld_info->ld_config.params.isSSCD != 1)
 1861                 mfi_add_ld_complete(cm);
 1862         else {
 1863                 mfi_release_command(cm);
 1864                 if (ld_info)            /* SSCD drives ld_info free here */
 1865                         free(ld_info, M_MFIBUF);
 1866         }
 1867         return (0);
 1868 }
 1869 
 1870 static void
 1871 mfi_add_ld_complete(struct mfi_command *cm)
 1872 {
 1873         struct mfi_frame_header *hdr;
 1874         struct mfi_ld_info *ld_info;
 1875         struct mfi_softc *sc;
 1876         device_t child;
 1877 
 1878         sc = cm->cm_sc;
 1879         hdr = &cm->cm_frame->header;
 1880         ld_info = cm->cm_private;
 1881 
 1882         if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
 1883                 free(ld_info, M_MFIBUF);
 1884                 wakeup(&sc->mfi_map_sync_cm);
 1885                 mfi_release_command(cm);
 1886                 return;
 1887         }
 1888         wakeup(&sc->mfi_map_sync_cm);
 1889         mfi_release_command(cm);
 1890 
 1891         mtx_unlock(&sc->mfi_io_lock);
 1892         mtx_lock(&Giant);
 1893         if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
 1894                 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
 1895                 free(ld_info, M_MFIBUF);
 1896                 mtx_unlock(&Giant);
 1897                 mtx_lock(&sc->mfi_io_lock);
 1898                 return;
 1899         }
 1900 
 1901         device_set_ivars(child, ld_info);
 1902         device_set_desc(child, "MFI Logical Disk");
 1903         bus_generic_attach(sc->mfi_dev);
 1904         mtx_unlock(&Giant);
 1905         mtx_lock(&sc->mfi_io_lock);
 1906 }
 1907 
 1908 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
 1909 {
 1910         struct mfi_command *cm;
 1911         struct mfi_dcmd_frame *dcmd = NULL;
 1912         struct mfi_pd_info *pd_info = NULL;
 1913         struct mfi_system_pending *syspd_pend;
 1914         int error;
 1915 
 1916         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1917 
 1918         syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
 1919         if (syspd_pend != NULL) {
 1920                 syspd_pend->pd_id = id;
 1921                 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
 1922         }
 1923 
 1924         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
 1925                 (void **)&pd_info, sizeof(*pd_info));
 1926         if (error) {
 1927                 device_printf(sc->mfi_dev,
 1928                     "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
 1929                     error);
 1930                 if (pd_info)
 1931                         free(pd_info, M_MFIBUF);
 1932                 return (error);
 1933         }
 1934         cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
 1935         dcmd = &cm->cm_frame->dcmd;
 1936         dcmd->mbox[0]=id;
 1937         dcmd->header.scsi_status = 0;
 1938         dcmd->header.pad0 = 0;
 1939         if (mfi_mapcmd(sc, cm) != 0) {
 1940                 device_printf(sc->mfi_dev,
 1941                     "Failed to get physical drive info %d\n", id);
 1942                 free(pd_info, M_MFIBUF);
 1943                 return (0);
 1944         }
 1945         bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
 1946             BUS_DMASYNC_POSTREAD);
 1947         bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
 1948         mfi_add_sys_pd_complete(cm);
 1949         return (0);
 1950 }
 1951 
 1952 static void
 1953 mfi_add_sys_pd_complete(struct mfi_command *cm)
 1954 {
 1955         struct mfi_frame_header *hdr;
 1956         struct mfi_pd_info *pd_info;
 1957         struct mfi_softc *sc;
 1958         device_t child;
 1959 
 1960         sc = cm->cm_sc;
 1961         hdr = &cm->cm_frame->header;
 1962         pd_info = cm->cm_private;
 1963 
 1964         if (hdr->cmd_status != MFI_STAT_OK) {
 1965                 free(pd_info, M_MFIBUF);
 1966                 mfi_release_command(cm);
 1967                 return;
 1968         }
 1969         if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
 1970                 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
 1971                     pd_info->ref.v.device_id);
 1972                 free(pd_info, M_MFIBUF);
 1973                 mfi_release_command(cm);
 1974                 return;
 1975         }
 1976         mfi_release_command(cm);
 1977 
 1978         mtx_unlock(&sc->mfi_io_lock);
 1979         mtx_lock(&Giant);
 1980         if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
 1981                 device_printf(sc->mfi_dev, "Failed to add system pd\n");
 1982                 free(pd_info, M_MFIBUF);
 1983                 mtx_unlock(&Giant);
 1984                 mtx_lock(&sc->mfi_io_lock);
 1985                 return;
 1986         }
 1987 
 1988         device_set_ivars(child, pd_info);
 1989         device_set_desc(child, "MFI System PD");
 1990         bus_generic_attach(sc->mfi_dev);
 1991         mtx_unlock(&Giant);
 1992         mtx_lock(&sc->mfi_io_lock);
 1993 }
 1994 
 1995 static struct mfi_command *
 1996 mfi_bio_command(struct mfi_softc *sc)
 1997 {
 1998         struct bio *bio;
 1999         struct mfi_command *cm = NULL;
 2000 
 2001         /*reserving two commands to avoid starvation for IOCTL*/
 2002         if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
 2003                 return (NULL);
 2004         }
 2005         if ((bio = mfi_dequeue_bio(sc)) == NULL) {
 2006                 return (NULL);
 2007         }
 2008         if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
 2009                 cm = mfi_build_ldio(sc, bio);
 2010         } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
 2011                 cm = mfi_build_syspdio(sc, bio);
 2012         }
 2013         if (!cm)
 2014             mfi_enqueue_bio(sc, bio);
 2015         return cm;
 2016 }
 2017 
 2018 /*
 2019  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
 2020  */
 2021 
 2022 int
 2023 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
 2024 {
 2025         int cdb_len;
 2026 
 2027         if (((lba & 0x1fffff) == lba)
 2028          && ((block_count & 0xff) == block_count)
 2029          && (byte2 == 0)) {
 2030                 /* We can fit in a 6 byte cdb */
 2031                 struct scsi_rw_6 *scsi_cmd;
 2032 
 2033                 scsi_cmd = (struct scsi_rw_6 *)cdb;
 2034                 scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
 2035                 scsi_ulto3b(lba, scsi_cmd->addr);
 2036                 scsi_cmd->length = block_count & 0xff;
 2037                 scsi_cmd->control = 0;
 2038                 cdb_len = sizeof(*scsi_cmd);
 2039         } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
 2040                 /* Need a 10 byte CDB */
 2041                 struct scsi_rw_10 *scsi_cmd;
 2042 
 2043                 scsi_cmd = (struct scsi_rw_10 *)cdb;
 2044                 scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
 2045                 scsi_cmd->byte2 = byte2;
 2046                 scsi_ulto4b(lba, scsi_cmd->addr);
 2047                 scsi_cmd->reserved = 0;
 2048                 scsi_ulto2b(block_count, scsi_cmd->length);
 2049                 scsi_cmd->control = 0;
 2050                 cdb_len = sizeof(*scsi_cmd);
 2051         } else if (((block_count & 0xffffffff) == block_count) &&
 2052             ((lba & 0xffffffff) == lba)) {
 2053                 /* Block count is too big for 10 byte CDB use a 12 byte CDB */
 2054                 struct scsi_rw_12 *scsi_cmd;
 2055 
 2056                 scsi_cmd = (struct scsi_rw_12 *)cdb;
 2057                 scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
 2058                 scsi_cmd->byte2 = byte2;
 2059                 scsi_ulto4b(lba, scsi_cmd->addr);
 2060                 scsi_cmd->reserved = 0;
 2061                 scsi_ulto4b(block_count, scsi_cmd->length);
 2062                 scsi_cmd->control = 0;
 2063                 cdb_len = sizeof(*scsi_cmd);
 2064         } else {
 2065                 /*
 2066                  * 16 byte CDB.  We'll only get here if the LBA is larger
 2067                  * than 2^32
 2068                  */
 2069                 struct scsi_rw_16 *scsi_cmd;
 2070 
 2071                 scsi_cmd = (struct scsi_rw_16 *)cdb;
 2072                 scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
 2073                 scsi_cmd->byte2 = byte2;
 2074                 scsi_u64to8b(lba, scsi_cmd->addr);
 2075                 scsi_cmd->reserved = 0;
 2076                 scsi_ulto4b(block_count, scsi_cmd->length);
 2077                 scsi_cmd->control = 0;
 2078                 cdb_len = sizeof(*scsi_cmd);
 2079         }
 2080 
 2081         return cdb_len;
 2082 }
 2083 
 2084 static struct mfi_command *
 2085 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
 2086 {
 2087         struct mfi_command *cm;
 2088         struct mfi_pass_frame *pass;
 2089         uint32_t context = 0;
 2090         int flags = 0, blkcount = 0, readop;
 2091         uint8_t cdb_len;
 2092 
 2093         if ((cm = mfi_dequeue_free(sc)) == NULL)
 2094             return (NULL);
 2095 
 2096         /* Zero out the MFI frame */
 2097         context = cm->cm_frame->header.context;
 2098         bzero(cm->cm_frame, sizeof(union mfi_frame));
 2099         cm->cm_frame->header.context = context;
 2100         pass = &cm->cm_frame->pass;
 2101         bzero(pass->cdb, 16);
 2102         pass->header.cmd = MFI_CMD_PD_SCSI_IO;
 2103         switch (bio->bio_cmd & 0x03) {
 2104         case BIO_READ:
 2105                 flags = MFI_CMD_DATAIN;
 2106                 readop = 1;
 2107                 break;
 2108         case BIO_WRITE:
 2109                 flags = MFI_CMD_DATAOUT;
 2110                 readop = 0;
 2111                 break;
 2112         default:
 2113                 /* TODO: what about BIO_DELETE??? */
 2114                 panic("Unsupported bio command %x\n", bio->bio_cmd);
 2115         }
 2116 
 2117         /* Cheat with the sector length to avoid a non-constant division */
 2118         blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
 2119         /* Fill the LBA and Transfer length in CDB */
 2120         cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
 2121             pass->cdb);
 2122         pass->header.target_id = (uintptr_t)bio->bio_driver1;
 2123         pass->header.lun_id = 0;
 2124         pass->header.timeout = 0;
 2125         pass->header.flags = 0;
 2126         pass->header.scsi_status = 0;
 2127         pass->header.sense_len = MFI_SENSE_LEN;
 2128         pass->header.data_len = bio->bio_bcount;
 2129         pass->header.cdb_len = cdb_len;
 2130         pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
 2131         pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
 2132         cm->cm_complete = mfi_bio_complete;
 2133         cm->cm_private = bio;
 2134         cm->cm_data = bio->bio_data;
 2135         cm->cm_len = bio->bio_bcount;
 2136         cm->cm_sg = &pass->sgl;
 2137         cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
 2138         cm->cm_flags = flags;
 2139         return (cm);
 2140 }
 2141 
 2142 static struct mfi_command *
 2143 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
 2144 {
 2145         struct mfi_io_frame *io;
 2146         struct mfi_command *cm;
 2147         int flags;
 2148         uint32_t blkcount;
 2149         uint32_t context = 0;
 2150 
 2151         if ((cm = mfi_dequeue_free(sc)) == NULL)
 2152             return (NULL);
 2153 
 2154         /* Zero out the MFI frame */
 2155         context = cm->cm_frame->header.context;
 2156         bzero(cm->cm_frame, sizeof(union mfi_frame));
 2157         cm->cm_frame->header.context = context;
 2158         io = &cm->cm_frame->io;
 2159         switch (bio->bio_cmd & 0x03) {
 2160         case BIO_READ:
 2161                 io->header.cmd = MFI_CMD_LD_READ;
 2162                 flags = MFI_CMD_DATAIN;
 2163                 break;
 2164         case BIO_WRITE:
 2165                 io->header.cmd = MFI_CMD_LD_WRITE;
 2166                 flags = MFI_CMD_DATAOUT;
 2167                 break;
 2168         default:
 2169                 /* TODO: what about BIO_DELETE??? */
 2170                 panic("Unsupported bio command %x\n", bio->bio_cmd);
 2171         }
 2172 
 2173         /* Cheat with the sector length to avoid a non-constant division */
 2174         blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
 2175         io->header.target_id = (uintptr_t)bio->bio_driver1;
 2176         io->header.timeout = 0;
 2177         io->header.flags = 0;
 2178         io->header.scsi_status = 0;
 2179         io->header.sense_len = MFI_SENSE_LEN;
 2180         io->header.data_len = blkcount;
 2181         io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
 2182         io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
 2183         io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
 2184         io->lba_lo = bio->bio_pblkno & 0xffffffff;
 2185         cm->cm_complete = mfi_bio_complete;
 2186         cm->cm_private = bio;
 2187         cm->cm_data = bio->bio_data;
 2188         cm->cm_len = bio->bio_bcount;
 2189         cm->cm_sg = &io->sgl;
 2190         cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
 2191         cm->cm_flags = flags;
 2192         return (cm);
 2193 }
 2194 
 2195 static void
 2196 mfi_bio_complete(struct mfi_command *cm)
 2197 {
 2198         struct bio *bio;
 2199         struct mfi_frame_header *hdr;
 2200         struct mfi_softc *sc;
 2201 
 2202         bio = cm->cm_private;
 2203         hdr = &cm->cm_frame->header;
 2204         sc = cm->cm_sc;
 2205 
 2206         if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
 2207                 bio->bio_flags |= BIO_ERROR;
 2208                 bio->bio_error = EIO;
 2209                 device_printf(sc->mfi_dev, "I/O error, status= %d "
 2210                     "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
 2211                 mfi_print_sense(cm->cm_sc, cm->cm_sense);
 2212         } else if (cm->cm_error != 0) {
 2213                 bio->bio_flags |= BIO_ERROR;
 2214         }
 2215 
 2216         mfi_release_command(cm);
 2217         mfi_disk_complete(bio);
 2218 }
 2219 
 2220 void
 2221 mfi_startio(struct mfi_softc *sc)
 2222 {
 2223         struct mfi_command *cm;
 2224         struct ccb_hdr *ccbh;
 2225 
 2226         for (;;) {
 2227                 /* Don't bother if we're short on resources */
 2228                 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
 2229                         break;
 2230 
 2231                 /* Try a command that has already been prepared */
 2232                 cm = mfi_dequeue_ready(sc);
 2233 
 2234                 if (cm == NULL) {
 2235                         if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
 2236                                 cm = sc->mfi_cam_start(ccbh);
 2237                 }
 2238 
 2239                 /* Nope, so look for work on the bioq */
 2240                 if (cm == NULL)
 2241                         cm = mfi_bio_command(sc);
 2242 
 2243                 /* No work available, so exit */
 2244                 if (cm == NULL)
 2245                         break;
 2246 
 2247                 /* Send the command to the controller */
 2248                 if (mfi_mapcmd(sc, cm) != 0) {
 2249                         mfi_requeue_ready(cm);
 2250                         break;
 2251                 }
 2252         }
 2253 }
 2254 
 2255 int
 2256 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
 2257 {
 2258         int error, polled;
 2259 
 2260         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 2261 
 2262         if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
 2263                 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
 2264                 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
 2265                     cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
 2266                 if (error == EINPROGRESS) {
 2267                         sc->mfi_flags |= MFI_FLAGS_QFRZN;
 2268                         return (0);
 2269                 }
 2270         } else {
 2271                 if (sc->MFA_enabled)
 2272                         error = mfi_tbolt_send_frame(sc, cm);
 2273                 else
 2274                         error = mfi_send_frame(sc, cm);
 2275         }
 2276 
 2277         return (error);
 2278 }
 2279 
 2280 static void
 2281 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
 2282 {
 2283         struct mfi_frame_header *hdr;
 2284         struct mfi_command *cm;
 2285         union mfi_sgl *sgl;
 2286         struct mfi_softc *sc;
 2287         int i, j, first, dir;
 2288         int sge_size;
 2289 
 2290         cm = (struct mfi_command *)arg;
 2291         sc = cm->cm_sc;
 2292         hdr = &cm->cm_frame->header;
 2293         sgl = cm->cm_sg;
 2294 
 2295         if (error) {
 2296                 printf("error %d in callback\n", error);
 2297                 cm->cm_error = error;
 2298                 mfi_complete(sc, cm);
 2299                 return;
 2300         }
 2301         /* Use IEEE sgl only for IO's on a SKINNY controller
 2302          * For other commands on a SKINNY controller use either
 2303          * sg32 or sg64 based on the sizeof(bus_addr_t).
 2304          * Also calculate the total frame size based on the type
 2305          * of SGL used.
 2306          */
 2307         if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
 2308             (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
 2309             (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
 2310             (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
 2311                 for (i = 0; i < nsegs; i++) {
 2312                         sgl->sg_skinny[i].addr = segs[i].ds_addr;
 2313                         sgl->sg_skinny[i].len = segs[i].ds_len;
 2314                         sgl->sg_skinny[i].flag = 0;
 2315                 }
 2316                 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
 2317                 sge_size = sizeof(struct mfi_sg_skinny);
 2318                 hdr->sg_count = nsegs;
 2319         } else {
 2320                 j = 0;
 2321                 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
 2322                         first = cm->cm_stp_len;
 2323                         if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
 2324                                 sgl->sg32[j].addr = segs[0].ds_addr;
 2325                                 sgl->sg32[j++].len = first;
 2326                         } else {
 2327                                 sgl->sg64[j].addr = segs[0].ds_addr;
 2328                                 sgl->sg64[j++].len = first;
 2329                         }
 2330                 } else
 2331                         first = 0;
 2332                 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
 2333                         for (i = 0; i < nsegs; i++) {
 2334                                 sgl->sg32[j].addr = segs[i].ds_addr + first;
 2335                                 sgl->sg32[j++].len = segs[i].ds_len - first;
 2336                                 first = 0;
 2337                         }
 2338                 } else {
 2339                         for (i = 0; i < nsegs; i++) {
 2340                                 sgl->sg64[j].addr = segs[i].ds_addr + first;
 2341                                 sgl->sg64[j++].len = segs[i].ds_len - first;
 2342                                 first = 0;
 2343                         }
 2344                         hdr->flags |= MFI_FRAME_SGL64;
 2345                 }
 2346                 hdr->sg_count = j;
 2347                 sge_size = sc->mfi_sge_size;
 2348         }
 2349 
 2350         dir = 0;
 2351         if (cm->cm_flags & MFI_CMD_DATAIN) {
 2352                 dir |= BUS_DMASYNC_PREREAD;
 2353                 hdr->flags |= MFI_FRAME_DIR_READ;
 2354         }
 2355         if (cm->cm_flags & MFI_CMD_DATAOUT) {
 2356                 dir |= BUS_DMASYNC_PREWRITE;
 2357                 hdr->flags |= MFI_FRAME_DIR_WRITE;
 2358         }
 2359         bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
 2360         cm->cm_flags |= MFI_CMD_MAPPED;
 2361 
 2362         /*
 2363          * Instead of calculating the total number of frames in the
 2364          * compound frame, it's already assumed that there will be at
 2365          * least 1 frame, so don't compensate for the modulo of the
 2366          * following division.
 2367          */
 2368         cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
 2369         cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
 2370 
 2371         if (sc->MFA_enabled)
 2372                         mfi_tbolt_send_frame(sc, cm);
 2373         else
 2374                 mfi_send_frame(sc, cm);
 2375 
 2376         return;
 2377 }
 2378 
 2379 static int
 2380 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
 2381 {
 2382         struct mfi_frame_header *hdr;
 2383         int tm = MFI_POLL_TIMEOUT_SECS * 1000;
 2384 
 2385         hdr = &cm->cm_frame->header;
 2386 
 2387         if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
 2388                 cm->cm_timestamp = time_uptime;
 2389                 mfi_enqueue_busy(cm);
 2390         } else {
 2391                 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
 2392                 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
 2393         }
 2394 
 2395         /*
 2396          * The bus address of the command is aligned on a 64 byte boundary,
 2397          * leaving the least 6 bits as zero.  For whatever reason, the
 2398          * hardware wants the address shifted right by three, leaving just
 2399          * 3 zero bits.  These three bits are then used as a prefetching
 2400          * hint for the hardware to predict how many frames need to be
 2401          * fetched across the bus.  If a command has more than 8 frames
 2402          * then the 3 bits are set to 0x7 and the firmware uses other
 2403          * information in the command to determine the total amount to fetch.
 2404          * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
 2405          * is enough for both 32bit and 64bit systems.
 2406          */
 2407         if (cm->cm_extra_frames > 7)
 2408                 cm->cm_extra_frames = 7;
 2409 
 2410         sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
 2411 
 2412         if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
 2413                 return (0);
 2414 
 2415         /* This is a polled command, so busy-wait for it to complete. */
 2416         while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
 2417                 DELAY(1000);
 2418                 tm -= 1;
 2419                 if (tm <= 0)
 2420                         break;
 2421         }
 2422 
 2423         if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
 2424                 device_printf(sc->mfi_dev, "Frame %p timed out "
 2425                     "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
 2426                 return (ETIMEDOUT);
 2427         }
 2428 
 2429         return (0);
 2430 }
 2431 
 2432 
 2433 void
 2434 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
 2435 {
 2436         int dir;
 2437 
 2438         if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
 2439                 dir = 0;
 2440                 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
 2441                     (cm->cm_frame->header.cmd == MFI_CMD_STP))
 2442                         dir |= BUS_DMASYNC_POSTREAD;
 2443                 if (cm->cm_flags & MFI_CMD_DATAOUT)
 2444                         dir |= BUS_DMASYNC_POSTWRITE;
 2445 
 2446                 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
 2447                 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
 2448                 cm->cm_flags &= ~MFI_CMD_MAPPED;
 2449         }
 2450 
 2451         cm->cm_flags |= MFI_CMD_COMPLETED;
 2452 
 2453         if (cm->cm_complete != NULL)
 2454                 cm->cm_complete(cm);
 2455         else
 2456                 wakeup(cm);
 2457 }
 2458 
 2459 static int
 2460 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
 2461 {
 2462         struct mfi_command *cm;
 2463         struct mfi_abort_frame *abort;
 2464         int i = 0;
 2465         uint32_t context = 0;
 2466 
 2467         mtx_lock(&sc->mfi_io_lock);
 2468         if ((cm = mfi_dequeue_free(sc)) == NULL) {
 2469                 return (EBUSY);
 2470         }
 2471 
 2472         /* Zero out the MFI frame */
 2473         context = cm->cm_frame->header.context;
 2474         bzero(cm->cm_frame, sizeof(union mfi_frame));
 2475         cm->cm_frame->header.context = context;
 2476 
 2477         abort = &cm->cm_frame->abort;
 2478         abort->header.cmd = MFI_CMD_ABORT;
 2479         abort->header.flags = 0;
 2480         abort->header.scsi_status = 0;
 2481         abort->abort_context = (*cm_abort)->cm_frame->header.context;
 2482         abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
 2483         abort->abort_mfi_addr_hi =
 2484                 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
 2485         cm->cm_data = NULL;
 2486         cm->cm_flags = MFI_CMD_POLLED;
 2487 
 2488         mfi_mapcmd(sc, cm);
 2489         mfi_release_command(cm);
 2490 
 2491         mtx_unlock(&sc->mfi_io_lock);
 2492         while (i < 5 && *cm_abort != NULL) {
 2493                 tsleep(cm_abort, 0, "mfiabort",
 2494                     5 * hz);
 2495                 i++;
 2496         }
 2497         if (*cm_abort != NULL) {
 2498                 /* Force a complete if command didn't abort */
 2499                 mtx_lock(&sc->mfi_io_lock);
 2500                 (*cm_abort)->cm_complete(*cm_abort);
 2501                 mtx_unlock(&sc->mfi_io_lock);
 2502         }
 2503 
 2504         return (0);
 2505 }
 2506 
 2507 int
 2508 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
 2509      int len)
 2510 {
 2511         struct mfi_command *cm;
 2512         struct mfi_io_frame *io;
 2513         int error;
 2514         uint32_t context = 0;
 2515 
 2516         if ((cm = mfi_dequeue_free(sc)) == NULL)
 2517                 return (EBUSY);
 2518 
 2519         /* Zero out the MFI frame */
 2520         context = cm->cm_frame->header.context;
 2521         bzero(cm->cm_frame, sizeof(union mfi_frame));
 2522         cm->cm_frame->header.context = context;
 2523 
 2524         io = &cm->cm_frame->io;
 2525         io->header.cmd = MFI_CMD_LD_WRITE;
 2526         io->header.target_id = id;
 2527         io->header.timeout = 0;
 2528         io->header.flags = 0;
 2529         io->header.scsi_status = 0;
 2530         io->header.sense_len = MFI_SENSE_LEN;
 2531         io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
 2532         io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
 2533         io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
 2534         io->lba_hi = (lba & 0xffffffff00000000) >> 32;
 2535         io->lba_lo = lba & 0xffffffff;
 2536         cm->cm_data = virt;
 2537         cm->cm_len = len;
 2538         cm->cm_sg = &io->sgl;
 2539         cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
 2540         cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
 2541 
 2542         error = mfi_mapcmd(sc, cm);
 2543         bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
 2544             BUS_DMASYNC_POSTWRITE);
 2545         bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
 2546         mfi_release_command(cm);
 2547 
 2548         return (error);
 2549 }
 2550 
 2551 int
 2552 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
 2553     int len)
 2554 {
 2555         struct mfi_command *cm;
 2556         struct mfi_pass_frame *pass;
 2557         int error, readop, cdb_len;
 2558         uint32_t blkcount;
 2559 
 2560         if ((cm = mfi_dequeue_free(sc)) == NULL)
 2561                 return (EBUSY);
 2562 
 2563         pass = &cm->cm_frame->pass;
 2564         bzero(pass->cdb, 16);
 2565         pass->header.cmd = MFI_CMD_PD_SCSI_IO;
 2566 
 2567         readop = 0;
 2568         blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
 2569         cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
 2570         pass->header.target_id = id;
 2571         pass->header.timeout = 0;
 2572         pass->header.flags = 0;
 2573         pass->header.scsi_status = 0;
 2574         pass->header.sense_len = MFI_SENSE_LEN;
 2575         pass->header.data_len = len;
 2576         pass->header.cdb_len = cdb_len;
 2577         pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
 2578         pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
 2579         cm->cm_data = virt;
 2580         cm->cm_len = len;
 2581         cm->cm_sg = &pass->sgl;
 2582         cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
 2583         cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
 2584 
 2585         error = mfi_mapcmd(sc, cm);
 2586         bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
 2587             BUS_DMASYNC_POSTWRITE);
 2588         bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
 2589         mfi_release_command(cm);
 2590 
 2591         return (error);
 2592 }
 2593 
 2594 static int
 2595 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
 2596 {
 2597         struct mfi_softc *sc;
 2598         int error;
 2599 
 2600         sc = dev->si_drv1;
 2601 
 2602         mtx_lock(&sc->mfi_io_lock);
 2603         if (sc->mfi_detaching)
 2604                 error = ENXIO;
 2605         else {
 2606                 sc->mfi_flags |= MFI_FLAGS_OPEN;
 2607                 error = 0;
 2608         }
 2609         mtx_unlock(&sc->mfi_io_lock);
 2610 
 2611         return (error);
 2612 }
 2613 
 2614 static int
 2615 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
 2616 {
 2617         struct mfi_softc *sc;
 2618         struct mfi_aen *mfi_aen_entry, *tmp;
 2619 
 2620         sc = dev->si_drv1;
 2621 
 2622         mtx_lock(&sc->mfi_io_lock);
 2623         sc->mfi_flags &= ~MFI_FLAGS_OPEN;
 2624 
 2625         TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
 2626                 if (mfi_aen_entry->p == curproc) {
 2627                         TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
 2628                             aen_link);
 2629                         free(mfi_aen_entry, M_MFIBUF);
 2630                 }
 2631         }
 2632         mtx_unlock(&sc->mfi_io_lock);
 2633         return (0);
 2634 }
 2635 
 2636 static int
 2637 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
 2638 {
 2639 
 2640         switch (opcode) {
 2641         case MFI_DCMD_LD_DELETE:
 2642         case MFI_DCMD_CFG_ADD:
 2643         case MFI_DCMD_CFG_CLEAR:
 2644                 sx_xlock(&sc->mfi_config_lock);
 2645                 return (1);
 2646         default:
 2647                 return (0);
 2648         }
 2649 }
 2650 
 2651 static void
 2652 mfi_config_unlock(struct mfi_softc *sc, int locked)
 2653 {
 2654 
 2655         if (locked)
 2656                 sx_xunlock(&sc->mfi_config_lock);
 2657 }
 2658 
 2659 /*
 2660  * Perform pre-issue checks on commands from userland and possibly veto
 2661  * them.
 2662  */
 2663 static int
 2664 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
 2665 {
 2666         struct mfi_disk *ld, *ld2;
 2667         int error;
 2668         struct mfi_system_pd *syspd = NULL;
 2669         uint16_t syspd_id;
 2670         uint16_t *mbox;
 2671 
 2672         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 2673         error = 0;
 2674         switch (cm->cm_frame->dcmd.opcode) {
 2675         case MFI_DCMD_LD_DELETE:
 2676                 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 2677                         if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
 2678                                 break;
 2679                 }
 2680                 if (ld == NULL)
 2681                         error = ENOENT;
 2682                 else
 2683                         error = mfi_disk_disable(ld);
 2684                 break;
 2685         case MFI_DCMD_CFG_CLEAR:
 2686                 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 2687                         error = mfi_disk_disable(ld);
 2688                         if (error)
 2689                                 break;
 2690                 }
 2691                 if (error) {
 2692                         TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
 2693                                 if (ld2 == ld)
 2694                                         break;
 2695                                 mfi_disk_enable(ld2);
 2696                         }
 2697                 }
 2698                 break;
 2699         case MFI_DCMD_PD_STATE_SET:
 2700                 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
 2701                 syspd_id = mbox[0];
 2702                 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
 2703                         TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
 2704                                 if (syspd->pd_id == syspd_id)
 2705                                         break;
 2706                         }
 2707                 }
 2708                 else
 2709                         break;
 2710                 if (syspd)
 2711                         error = mfi_syspd_disable(syspd);
 2712                 break;
 2713         default:
 2714                 break;
 2715         }
 2716         return (error);
 2717 }
 2718 
 2719 /* Perform post-issue checks on commands from userland. */
 2720 static void
 2721 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
 2722 {
 2723         struct mfi_disk *ld, *ldn;
 2724         struct mfi_system_pd *syspd = NULL;
 2725         uint16_t syspd_id;
 2726         uint16_t *mbox;
 2727 
 2728         switch (cm->cm_frame->dcmd.opcode) {
 2729         case MFI_DCMD_LD_DELETE:
 2730                 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 2731                         if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
 2732                                 break;
 2733                 }
 2734                 KASSERT(ld != NULL, ("volume dissappeared"));
 2735                 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
 2736                         mtx_unlock(&sc->mfi_io_lock);
 2737                         mtx_lock(&Giant);
 2738                         device_delete_child(sc->mfi_dev, ld->ld_dev);
 2739                         mtx_unlock(&Giant);
 2740                         mtx_lock(&sc->mfi_io_lock);
 2741                 } else
 2742                         mfi_disk_enable(ld);
 2743                 break;
 2744         case MFI_DCMD_CFG_CLEAR:
 2745                 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
 2746                         mtx_unlock(&sc->mfi_io_lock);
 2747                         mtx_lock(&Giant);
 2748                         TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
 2749                                 device_delete_child(sc->mfi_dev, ld->ld_dev);
 2750                         }
 2751                         mtx_unlock(&Giant);
 2752                         mtx_lock(&sc->mfi_io_lock);
 2753                 } else {
 2754                         TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
 2755                                 mfi_disk_enable(ld);
 2756                 }
 2757                 break;
 2758         case MFI_DCMD_CFG_ADD:
 2759                 mfi_ldprobe(sc);
 2760                 break;
 2761         case MFI_DCMD_CFG_FOREIGN_IMPORT:
 2762                 mfi_ldprobe(sc);
 2763                 break;
 2764         case MFI_DCMD_PD_STATE_SET:
 2765                 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
 2766                 syspd_id = mbox[0];
 2767                 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
 2768                         TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
 2769                                 if (syspd->pd_id == syspd_id)
 2770                                         break;
 2771                         }
 2772                 }
 2773                 else
 2774                         break;
 2775                 /* If the transition fails then enable the syspd again */
 2776                 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
 2777                         mfi_syspd_enable(syspd);
 2778                 break;
 2779         }
 2780 }
 2781 
 2782 static int
 2783 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
 2784 {
 2785         struct mfi_config_data *conf_data;
 2786         struct mfi_command *ld_cm = NULL;
 2787         struct mfi_ld_info *ld_info = NULL;
 2788         struct mfi_ld_config *ld;
 2789         char *p;
 2790         int error = 0;
 2791 
 2792         conf_data = (struct mfi_config_data *)cm->cm_data;
 2793 
 2794         if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
 2795                 p = (char *)conf_data->array;
 2796                 p += conf_data->array_size * conf_data->array_count;
 2797                 ld = (struct mfi_ld_config *)p;
 2798                 if (ld->params.isSSCD == 1)
 2799                         error = 1;
 2800         } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
 2801                 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
 2802                     (void **)&ld_info, sizeof(*ld_info));
 2803                 if (error) {
 2804                         device_printf(sc->mfi_dev, "Failed to allocate"
 2805                             "MFI_DCMD_LD_GET_INFO %d", error);
 2806                         if (ld_info)
 2807                                 free(ld_info, M_MFIBUF);
 2808                         return 0;
 2809                 }
 2810                 ld_cm->cm_flags = MFI_CMD_DATAIN;
 2811                 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
 2812                 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
 2813                 if (mfi_wait_command(sc, ld_cm) != 0) {
 2814                         device_printf(sc->mfi_dev, "failed to get log drv\n");
 2815                         mfi_release_command(ld_cm);
 2816                         free(ld_info, M_MFIBUF);
 2817                         return 0;
 2818                 }
 2819 
 2820                 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
 2821                         free(ld_info, M_MFIBUF);
 2822                         mfi_release_command(ld_cm);
 2823                         return 0;
 2824                 }
 2825                 else
 2826                         ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
 2827 
 2828                 if (ld_info->ld_config.params.isSSCD == 1)
 2829                         error = 1;
 2830 
 2831                 mfi_release_command(ld_cm);
 2832                 free(ld_info, M_MFIBUF);
 2833 
 2834         }
 2835         return error;
 2836 }
 2837 
 2838 static int
 2839 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
 2840 {
 2841         uint8_t i;
 2842         struct mfi_ioc_packet *ioc;
 2843         ioc = (struct mfi_ioc_packet *)arg;
 2844         int sge_size, error;
 2845         struct megasas_sge *kern_sge;
 2846 
 2847         memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
 2848         kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
 2849         cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
 2850 
 2851         if (sizeof(bus_addr_t) == 8) {
 2852                 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
 2853                 cm->cm_extra_frames = 2;
 2854                 sge_size = sizeof(struct mfi_sg64);
 2855         } else {
 2856                 cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
 2857                 sge_size = sizeof(struct mfi_sg32);
 2858         }
 2859 
 2860         cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
 2861         for (i = 0; i < ioc->mfi_sge_count; i++) {
 2862                         if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
 2863                         1, 0,                   /* algnmnt, boundary */
 2864                         BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
 2865                         BUS_SPACE_MAXADDR,      /* highaddr */
 2866                         NULL, NULL,             /* filter, filterarg */
 2867                         ioc->mfi_sgl[i].iov_len,/* maxsize */
 2868                         2,                      /* nsegments */
 2869                         ioc->mfi_sgl[i].iov_len,/* maxsegsize */
 2870                         BUS_DMA_ALLOCNOW,       /* flags */
 2871                         NULL, NULL,             /* lockfunc, lockarg */
 2872                         &sc->mfi_kbuff_arr_dmat[i])) {
 2873                         device_printf(sc->mfi_dev,
 2874                             "Cannot allocate mfi_kbuff_arr_dmat tag\n");
 2875                         return (ENOMEM);
 2876                 }
 2877 
 2878                 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
 2879                     (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
 2880                     &sc->mfi_kbuff_arr_dmamap[i])) {
 2881                         device_printf(sc->mfi_dev,
 2882                             "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
 2883                         return (ENOMEM);
 2884                 }
 2885 
 2886                 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
 2887                     sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
 2888                     ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
 2889                     &sc->mfi_kbuff_arr_busaddr[i], 0);
 2890 
 2891                 if (!sc->kbuff_arr[i]) {
 2892                         device_printf(sc->mfi_dev,
 2893                             "Could not allocate memory for kbuff_arr info\n");
 2894                         return -1;
 2895                 }
 2896                 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
 2897                 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
 2898 
 2899                 if (sizeof(bus_addr_t) == 8) {
 2900                         cm->cm_frame->stp.sgl.sg64[i].addr =
 2901                             kern_sge[i].phys_addr;
 2902                         cm->cm_frame->stp.sgl.sg64[i].len =
 2903                             ioc->mfi_sgl[i].iov_len;
 2904                 } else {
 2905                         cm->cm_frame->stp.sgl.sg32[i].len =
 2906                             kern_sge[i].phys_addr;
 2907                         cm->cm_frame->stp.sgl.sg32[i].len =
 2908                             ioc->mfi_sgl[i].iov_len;
 2909                 }
 2910 
 2911                 error = copyin(ioc->mfi_sgl[i].iov_base,
 2912                     sc->kbuff_arr[i],
 2913                     ioc->mfi_sgl[i].iov_len);
 2914                 if (error != 0) {
 2915                         device_printf(sc->mfi_dev, "Copy in failed\n");
 2916                         return error;
 2917                 }
 2918         }
 2919 
 2920         cm->cm_flags |=MFI_CMD_MAPPED;
 2921         return 0;
 2922 }
 2923 
 2924 static int
 2925 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
 2926 {
 2927         struct mfi_command *cm;
 2928         struct mfi_dcmd_frame *dcmd;
 2929         void *ioc_buf = NULL;
 2930         uint32_t context;
 2931         int error = 0, locked;
 2932 
 2933 
 2934         if (ioc->buf_size > 0) {
 2935                 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
 2936                 if (ioc_buf == NULL) {
 2937                         return (ENOMEM);
 2938                 }
 2939                 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
 2940                 if (error) {
 2941                         device_printf(sc->mfi_dev, "failed to copyin\n");
 2942                         free(ioc_buf, M_MFIBUF);
 2943                         return (error);
 2944                 }
 2945         }
 2946 
 2947         locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
 2948 
 2949         mtx_lock(&sc->mfi_io_lock);
 2950         while ((cm = mfi_dequeue_free(sc)) == NULL)
 2951                 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
 2952 
 2953         /* Save context for later */
 2954         context = cm->cm_frame->header.context;
 2955 
 2956         dcmd = &cm->cm_frame->dcmd;
 2957         bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
 2958 
 2959         cm->cm_sg = &dcmd->sgl;
 2960         cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
 2961         cm->cm_data = ioc_buf;
 2962         cm->cm_len = ioc->buf_size;
 2963 
 2964         /* restore context */
 2965         cm->cm_frame->header.context = context;
 2966 
 2967         /* Cheat since we don't know if we're writing or reading */
 2968         cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
 2969 
 2970         error = mfi_check_command_pre(sc, cm);
 2971         if (error)
 2972                 goto out;
 2973 
 2974         error = mfi_wait_command(sc, cm);
 2975         if (error) {
 2976                 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
 2977                 goto out;
 2978         }
 2979         bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
 2980         mfi_check_command_post(sc, cm);
 2981 out:
 2982         mfi_release_command(cm);
 2983         mtx_unlock(&sc->mfi_io_lock);
 2984         mfi_config_unlock(sc, locked);
 2985         if (ioc->buf_size > 0)
 2986                 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
 2987         if (ioc_buf)
 2988                 free(ioc_buf, M_MFIBUF);
 2989         return (error);
 2990 }
 2991 
 2992 #define PTRIN(p)                ((void *)(uintptr_t)(p))
 2993 
 2994 static int
 2995 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
 2996 {
 2997         struct mfi_softc *sc;
 2998         union mfi_statrequest *ms;
 2999         struct mfi_ioc_packet *ioc;
 3000 #ifdef COMPAT_FREEBSD32
 3001         struct mfi_ioc_packet32 *ioc32;
 3002 #endif
 3003         struct mfi_ioc_aen *aen;
 3004         struct mfi_command *cm = NULL;
 3005         uint32_t context = 0;
 3006         union mfi_sense_ptr sense_ptr;
 3007         uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
 3008         size_t len;
 3009         int i, res;
 3010         struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
 3011 #ifdef COMPAT_FREEBSD32
 3012         struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
 3013         struct mfi_ioc_passthru iop_swab;
 3014 #endif
 3015         int error, locked;
 3016         union mfi_sgl *sgl;
 3017         sc = dev->si_drv1;
 3018         error = 0;
 3019 
 3020         if (sc->adpreset)
 3021                 return EBUSY;
 3022 
 3023         if (sc->hw_crit_error)
 3024                 return EBUSY;
 3025 
 3026         if (sc->issuepend_done == 0)
 3027                 return EBUSY;
 3028 
 3029         switch (cmd) {
 3030         case MFIIO_STATS:
 3031                 ms = (union mfi_statrequest *)arg;
 3032                 switch (ms->ms_item) {
 3033                 case MFIQ_FREE:
 3034                 case MFIQ_BIO:
 3035                 case MFIQ_READY:
 3036                 case MFIQ_BUSY:
 3037                         bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
 3038                             sizeof(struct mfi_qstat));
 3039                         break;
 3040                 default:
 3041                         error = ENOIOCTL;
 3042                         break;
 3043                 }
 3044                 break;
 3045         case MFIIO_QUERY_DISK:
 3046         {
 3047                 struct mfi_query_disk *qd;
 3048                 struct mfi_disk *ld;
 3049 
 3050                 qd = (struct mfi_query_disk *)arg;
 3051                 mtx_lock(&sc->mfi_io_lock);
 3052                 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 3053                         if (ld->ld_id == qd->array_id)
 3054                                 break;
 3055                 }
 3056                 if (ld == NULL) {
 3057                         qd->present = 0;
 3058                         mtx_unlock(&sc->mfi_io_lock);
 3059                         return (0);
 3060                 }
 3061                 qd->present = 1;
 3062                 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
 3063                         qd->open = 1;
 3064                 bzero(qd->devname, SPECNAMELEN + 1);
 3065                 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
 3066                 mtx_unlock(&sc->mfi_io_lock);
 3067                 break;
 3068         }
 3069         case MFI_CMD:
 3070 #ifdef COMPAT_FREEBSD32
 3071         case MFI_CMD32:
 3072 #endif
 3073                 {
 3074                 devclass_t devclass;
 3075                 ioc = (struct mfi_ioc_packet *)arg;
 3076                 int adapter;
 3077 
 3078                 adapter = ioc->mfi_adapter_no;
 3079                 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
 3080                         devclass = devclass_find("mfi");
 3081                         sc = devclass_get_softc(devclass, adapter);
 3082                 }
 3083                 mtx_lock(&sc->mfi_io_lock);
 3084                 if ((cm = mfi_dequeue_free(sc)) == NULL) {
 3085                         mtx_unlock(&sc->mfi_io_lock);
 3086                         return (EBUSY);
 3087                 }
 3088                 mtx_unlock(&sc->mfi_io_lock);
 3089                 locked = 0;
 3090 
 3091                 /*
 3092                  * save off original context since copying from user
 3093                  * will clobber some data
 3094                  */
 3095                 context = cm->cm_frame->header.context;
 3096                 cm->cm_frame->header.context = cm->cm_index;
 3097 
 3098                 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
 3099                     2 * MEGAMFI_FRAME_SIZE);
 3100                 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
 3101                     * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
 3102                 cm->cm_frame->header.scsi_status = 0;
 3103                 cm->cm_frame->header.pad0 = 0;
 3104                 if (ioc->mfi_sge_count) {
 3105                         cm->cm_sg =
 3106                             (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
 3107                 }
 3108                 sgl = cm->cm_sg;
 3109                 cm->cm_flags = 0;
 3110                 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
 3111                         cm->cm_flags |= MFI_CMD_DATAIN;
 3112                 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
 3113                         cm->cm_flags |= MFI_CMD_DATAOUT;
 3114                 /* Legacy app shim */
 3115                 if (cm->cm_flags == 0)
 3116                         cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
 3117                 cm->cm_len = cm->cm_frame->header.data_len;
 3118                 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
 3119 #ifdef COMPAT_FREEBSD32
 3120                         if (cmd == MFI_CMD) {
 3121 #endif
 3122                                 /* Native */
 3123                                 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
 3124 #ifdef COMPAT_FREEBSD32
 3125                         } else {
 3126                                 /* 32bit on 64bit */
 3127                                 ioc32 = (struct mfi_ioc_packet32 *)ioc;
 3128                                 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
 3129                         }
 3130 #endif
 3131                         cm->cm_len += cm->cm_stp_len;
 3132                 }
 3133                 if (cm->cm_len &&
 3134                     (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
 3135                         cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
 3136                             M_WAITOK | M_ZERO);
 3137                         if (cm->cm_data == NULL) {
 3138                                 device_printf(sc->mfi_dev, "Malloc failed\n");
 3139                                 goto out;
 3140                         }
 3141                 } else {
 3142                         cm->cm_data = 0;
 3143                 }
 3144 
 3145                 /* restore header context */
 3146                 cm->cm_frame->header.context = context;
 3147 
 3148                 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
 3149                         res = mfi_stp_cmd(sc, cm, arg);
 3150                         if (res != 0)
 3151                                 goto out;
 3152                 } else {
 3153                         temp = data;
 3154                         if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
 3155                             (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
 3156                                 for (i = 0; i < ioc->mfi_sge_count; i++) {
 3157 #ifdef COMPAT_FREEBSD32
 3158                                         if (cmd == MFI_CMD) {
 3159 #endif
 3160                                                 /* Native */
 3161                                                 addr = ioc->mfi_sgl[i].iov_base;
 3162                                                 len = ioc->mfi_sgl[i].iov_len;
 3163 #ifdef COMPAT_FREEBSD32
 3164                                         } else {
 3165                                                 /* 32bit on 64bit */
 3166                                                 ioc32 = (struct mfi_ioc_packet32 *)ioc;
 3167                                                 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
 3168                                                 len = ioc32->mfi_sgl[i].iov_len;
 3169                                         }
 3170 #endif
 3171                                         error = copyin(addr, temp, len);
 3172                                         if (error != 0) {
 3173                                                 device_printf(sc->mfi_dev,
 3174                                                     "Copy in failed\n");
 3175                                                 goto out;
 3176                                         }
 3177                                         temp = &temp[len];
 3178                                 }
 3179                         }
 3180                 }
 3181 
 3182                 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
 3183                         locked = mfi_config_lock(sc,
 3184                              cm->cm_frame->dcmd.opcode);
 3185 
 3186                 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
 3187                         cm->cm_frame->pass.sense_addr_lo =
 3188                             (uint32_t)cm->cm_sense_busaddr;
 3189                         cm->cm_frame->pass.sense_addr_hi =
 3190                             (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
 3191                 }
 3192                 mtx_lock(&sc->mfi_io_lock);
 3193                 skip_pre_post = mfi_check_for_sscd (sc, cm);
 3194                 if (!skip_pre_post) {
 3195                         error = mfi_check_command_pre(sc, cm);
 3196                         if (error) {
 3197                                 mtx_unlock(&sc->mfi_io_lock);
 3198                                 goto out;
 3199                         }
 3200                 }
 3201                 if ((error = mfi_wait_command(sc, cm)) != 0) {
 3202                         device_printf(sc->mfi_dev,
 3203                             "Controller polled failed\n");
 3204                         mtx_unlock(&sc->mfi_io_lock);
 3205                         goto out;
 3206                 }
 3207                 if (!skip_pre_post) {
 3208                         mfi_check_command_post(sc, cm);
 3209                 }
 3210                 mtx_unlock(&sc->mfi_io_lock);
 3211 
 3212                 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
 3213                         temp = data;
 3214                         if ((cm->cm_flags & MFI_CMD_DATAIN) ||
 3215                             (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
 3216                                 for (i = 0; i < ioc->mfi_sge_count; i++) {
 3217 #ifdef COMPAT_FREEBSD32
 3218                                         if (cmd == MFI_CMD) {
 3219 #endif
 3220                                                 /* Native */
 3221                                                 addr = ioc->mfi_sgl[i].iov_base;
 3222                                                 len = ioc->mfi_sgl[i].iov_len;
 3223 #ifdef COMPAT_FREEBSD32
 3224                                         } else {
 3225                                                 /* 32bit on 64bit */
 3226                                                 ioc32 = (struct mfi_ioc_packet32 *)ioc;
 3227                                                 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
 3228                                                 len = ioc32->mfi_sgl[i].iov_len;
 3229                                         }
 3230 #endif
 3231                                         error = copyout(temp, addr, len);
 3232                                         if (error != 0) {
 3233                                                 device_printf(sc->mfi_dev,
 3234                                                     "Copy out failed\n");
 3235                                                 goto out;
 3236                                         }
 3237                                         temp = &temp[len];
 3238                                 }
 3239                         }
 3240                 }
 3241 
 3242                 if (ioc->mfi_sense_len) {
 3243                         /* get user-space sense ptr then copy out sense */
 3244                         bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
 3245                             &sense_ptr.sense_ptr_data[0],
 3246                             sizeof(sense_ptr.sense_ptr_data));
 3247 #ifdef COMPAT_FREEBSD32
 3248                         if (cmd != MFI_CMD) {
 3249                                 /*
 3250                                  * not 64bit native so zero out any address
 3251                                  * over 32bit */
 3252                                 sense_ptr.addr.high = 0;
 3253                         }
 3254 #endif
 3255                         error = copyout(cm->cm_sense, sense_ptr.user_space,
 3256                             ioc->mfi_sense_len);
 3257                         if (error != 0) {
 3258                                 device_printf(sc->mfi_dev,
 3259                                     "Copy out failed\n");
 3260                                 goto out;
 3261                         }
 3262                 }
 3263 
 3264                 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
 3265 out:
 3266                 mfi_config_unlock(sc, locked);
 3267                 if (data)
 3268                         free(data, M_MFIBUF);
 3269                 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
 3270                         for (i = 0; i < 2; i++) {
 3271                                 if (sc->kbuff_arr[i]) {
 3272                                         if (sc->mfi_kbuff_arr_busaddr != 0)
 3273                                                 bus_dmamap_unload(
 3274                                                     sc->mfi_kbuff_arr_dmat[i],
 3275                                                     sc->mfi_kbuff_arr_dmamap[i]
 3276                                                     );
 3277                                         if (sc->kbuff_arr[i] != NULL)
 3278                                                 bus_dmamem_free(
 3279                                                     sc->mfi_kbuff_arr_dmat[i],
 3280                                                     sc->kbuff_arr[i],
 3281                                                     sc->mfi_kbuff_arr_dmamap[i]
 3282                                                     );
 3283                                         if (sc->mfi_kbuff_arr_dmat[i] != NULL)
 3284                                                 bus_dma_tag_destroy(
 3285                                                     sc->mfi_kbuff_arr_dmat[i]);
 3286                                 }
 3287                         }
 3288                 }
 3289                 if (cm) {
 3290                         mtx_lock(&sc->mfi_io_lock);
 3291                         mfi_release_command(cm);
 3292                         mtx_unlock(&sc->mfi_io_lock);
 3293                 }
 3294 
 3295                 break;
 3296                 }
 3297         case MFI_SET_AEN:
 3298                 aen = (struct mfi_ioc_aen *)arg;
 3299                 error = mfi_aen_register(sc, aen->aen_seq_num,
 3300                     aen->aen_class_locale);
 3301 
 3302                 break;
 3303         case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
 3304                 {
 3305                         devclass_t devclass;
 3306                         struct mfi_linux_ioc_packet l_ioc;
 3307                         int adapter;
 3308 
 3309                         devclass = devclass_find("mfi");
 3310                         if (devclass == NULL)
 3311                                 return (ENOENT);
 3312 
 3313                         error = copyin(arg, &l_ioc, sizeof(l_ioc));
 3314                         if (error)
 3315                                 return (error);
 3316                         adapter = l_ioc.lioc_adapter_no;
 3317                         sc = devclass_get_softc(devclass, adapter);
 3318                         if (sc == NULL)
 3319                                 return (ENOENT);
 3320                         return (mfi_linux_ioctl_int(sc->mfi_cdev,
 3321                             cmd, arg, flag, td));
 3322                         break;
 3323                 }
 3324         case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
 3325                 {
 3326                         devclass_t devclass;
 3327                         struct mfi_linux_ioc_aen l_aen;
 3328                         int adapter;
 3329 
 3330                         devclass = devclass_find("mfi");
 3331                         if (devclass == NULL)
 3332                                 return (ENOENT);
 3333 
 3334                         error = copyin(arg, &l_aen, sizeof(l_aen));
 3335                         if (error)
 3336                                 return (error);
 3337                         adapter = l_aen.laen_adapter_no;
 3338                         sc = devclass_get_softc(devclass, adapter);
 3339                         if (sc == NULL)
 3340                                 return (ENOENT);
 3341                         return (mfi_linux_ioctl_int(sc->mfi_cdev,
 3342                             cmd, arg, flag, td));
 3343                         break;
 3344                 }
 3345 #ifdef COMPAT_FREEBSD32
 3346         case MFIIO_PASSTHRU32:
 3347                 iop_swab.ioc_frame      = iop32->ioc_frame;
 3348                 iop_swab.buf_size       = iop32->buf_size;
 3349                 iop_swab.buf            = PTRIN(iop32->buf);
 3350                 iop                     = &iop_swab;
 3351                 /* FALLTHROUGH */
 3352 #endif
 3353         case MFIIO_PASSTHRU:
 3354                 error = mfi_user_command(sc, iop);
 3355 #ifdef COMPAT_FREEBSD32
 3356                 if (cmd == MFIIO_PASSTHRU32)
 3357                         iop32->ioc_frame = iop_swab.ioc_frame;
 3358 #endif
 3359                 break;
 3360         default:
 3361                 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
 3362                 error = ENOENT;
 3363                 break;
 3364         }
 3365 
 3366         return (error);
 3367 }
 3368 
 3369 static int
 3370 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
 3371 {
 3372         struct mfi_softc *sc;
 3373         struct mfi_linux_ioc_packet l_ioc;
 3374         struct mfi_linux_ioc_aen l_aen;
 3375         struct mfi_command *cm = NULL;
 3376         struct mfi_aen *mfi_aen_entry;
 3377         union mfi_sense_ptr sense_ptr;
 3378         uint32_t context = 0;
 3379         uint8_t *data = NULL, *temp;
 3380         int i;
 3381         int error, locked;
 3382 
 3383         sc = dev->si_drv1;
 3384         error = 0;
 3385         switch (cmd) {
 3386         case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
 3387                 error = copyin(arg, &l_ioc, sizeof(l_ioc));
 3388                 if (error != 0)
 3389                         return (error);
 3390 
 3391                 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
 3392                         return (EINVAL);
 3393                 }
 3394 
 3395                 mtx_lock(&sc->mfi_io_lock);
 3396                 if ((cm = mfi_dequeue_free(sc)) == NULL) {
 3397                         mtx_unlock(&sc->mfi_io_lock);
 3398                         return (EBUSY);
 3399                 }
 3400                 mtx_unlock(&sc->mfi_io_lock);
 3401                 locked = 0;
 3402 
 3403                 /*
 3404                  * save off original context since copying from user
 3405                  * will clobber some data
 3406                  */
 3407                 context = cm->cm_frame->header.context;
 3408 
 3409                 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
 3410                       2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
 3411                 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
 3412                       * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
 3413                 cm->cm_frame->header.scsi_status = 0;
 3414                 cm->cm_frame->header.pad0 = 0;
 3415                 if (l_ioc.lioc_sge_count)
 3416                         cm->cm_sg =
 3417                             (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
 3418                 cm->cm_flags = 0;
 3419                 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
 3420                         cm->cm_flags |= MFI_CMD_DATAIN;
 3421                 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
 3422                         cm->cm_flags |= MFI_CMD_DATAOUT;
 3423                 cm->cm_len = cm->cm_frame->header.data_len;
 3424                 if (cm->cm_len &&
 3425                       (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
 3426                         cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
 3427                             M_WAITOK | M_ZERO);
 3428                         if (cm->cm_data == NULL) {
 3429                                 device_printf(sc->mfi_dev, "Malloc failed\n");
 3430                                 goto out;
 3431                         }
 3432                 } else {
 3433                         cm->cm_data = 0;
 3434                 }
 3435 
 3436                 /* restore header context */
 3437                 cm->cm_frame->header.context = context;
 3438 
 3439                 temp = data;
 3440                 if (cm->cm_flags & MFI_CMD_DATAOUT) {
 3441                         for (i = 0; i < l_ioc.lioc_sge_count; i++) {
 3442                                 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
 3443                                        temp,
 3444                                        l_ioc.lioc_sgl[i].iov_len);
 3445                                 if (error != 0) {
 3446                                         device_printf(sc->mfi_dev,
 3447                                             "Copy in failed\n");
 3448                                         goto out;
 3449                                 }
 3450                                 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
 3451                         }
 3452                 }
 3453 
 3454                 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
 3455                         locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
 3456 
 3457                 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
 3458                         cm->cm_frame->pass.sense_addr_lo =
 3459                             (uint32_t)cm->cm_sense_busaddr;
 3460                         cm->cm_frame->pass.sense_addr_hi =
 3461                             (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
 3462                 }
 3463 
 3464                 mtx_lock(&sc->mfi_io_lock);
 3465                 error = mfi_check_command_pre(sc, cm);
 3466                 if (error) {
 3467                         mtx_unlock(&sc->mfi_io_lock);
 3468                         goto out;
 3469                 }
 3470 
 3471                 if ((error = mfi_wait_command(sc, cm)) != 0) {
 3472                         device_printf(sc->mfi_dev,
 3473                             "Controller polled failed\n");
 3474                         mtx_unlock(&sc->mfi_io_lock);
 3475                         goto out;
 3476                 }
 3477 
 3478                 mfi_check_command_post(sc, cm);
 3479                 mtx_unlock(&sc->mfi_io_lock);
 3480 
 3481                 temp = data;
 3482                 if (cm->cm_flags & MFI_CMD_DATAIN) {
 3483                         for (i = 0; i < l_ioc.lioc_sge_count; i++) {
 3484                                 error = copyout(temp,
 3485                                         PTRIN(l_ioc.lioc_sgl[i].iov_base),
 3486                                         l_ioc.lioc_sgl[i].iov_len);
 3487                                 if (error != 0) {
 3488                                         device_printf(sc->mfi_dev,
 3489                                             "Copy out failed\n");
 3490                                         goto out;
 3491                                 }
 3492                                 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
 3493                         }
 3494                 }
 3495 
 3496                 if (l_ioc.lioc_sense_len) {
 3497                         /* get user-space sense ptr then copy out sense */
 3498                         bcopy(&((struct mfi_linux_ioc_packet*)arg)
 3499                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
 3500                             &sense_ptr.sense_ptr_data[0],
 3501                             sizeof(sense_ptr.sense_ptr_data));
 3502 #ifdef __amd64__
 3503                         /*
 3504                          * only 32bit Linux support so zero out any
 3505                          * address over 32bit
 3506                          */
 3507                         sense_ptr.addr.high = 0;
 3508 #endif
 3509                         error = copyout(cm->cm_sense, sense_ptr.user_space,
 3510                             l_ioc.lioc_sense_len);
 3511                         if (error != 0) {
 3512                                 device_printf(sc->mfi_dev,
 3513                                     "Copy out failed\n");
 3514                                 goto out;
 3515                         }
 3516                 }
 3517 
 3518                 error = copyout(&cm->cm_frame->header.cmd_status,
 3519                         &((struct mfi_linux_ioc_packet*)arg)
 3520                         ->lioc_frame.hdr.cmd_status,
 3521                         1);
 3522                 if (error != 0) {
 3523                         device_printf(sc->mfi_dev,
 3524                                       "Copy out failed\n");
 3525                         goto out;
 3526                 }
 3527 
 3528 out:
 3529                 mfi_config_unlock(sc, locked);
 3530                 if (data)
 3531                         free(data, M_MFIBUF);
 3532                 if (cm) {
 3533                         mtx_lock(&sc->mfi_io_lock);
 3534                         mfi_release_command(cm);
 3535                         mtx_unlock(&sc->mfi_io_lock);
 3536                 }
 3537 
 3538                 return (error);
 3539         case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
 3540                 error = copyin(arg, &l_aen, sizeof(l_aen));
 3541                 if (error != 0)
 3542                         return (error);
 3543                 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
 3544                 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
 3545                     M_WAITOK);
 3546                 mtx_lock(&sc->mfi_io_lock);
 3547                 if (mfi_aen_entry != NULL) {
 3548                         mfi_aen_entry->p = curproc;
 3549                         TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
 3550                             aen_link);
 3551                 }
 3552                 error = mfi_aen_register(sc, l_aen.laen_seq_num,
 3553                     l_aen.laen_class_locale);
 3554 
 3555                 if (error != 0) {
 3556                         TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
 3557                             aen_link);
 3558                         free(mfi_aen_entry, M_MFIBUF);
 3559                 }
 3560                 mtx_unlock(&sc->mfi_io_lock);
 3561 
 3562                 return (error);
 3563         default:
 3564                 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
 3565                 error = ENOENT;
 3566                 break;
 3567         }
 3568 
 3569         return (error);
 3570 }
 3571 
 3572 static int
 3573 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
 3574 {
 3575         struct mfi_softc *sc;
 3576         int revents = 0;
 3577 
 3578         sc = dev->si_drv1;
 3579 
 3580         if (poll_events & (POLLIN | POLLRDNORM)) {
 3581                 if (sc->mfi_aen_triggered != 0) {
 3582                         revents |= poll_events & (POLLIN | POLLRDNORM);
 3583                         sc->mfi_aen_triggered = 0;
 3584                 }
 3585                 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
 3586                         revents |= POLLERR;
 3587                 }
 3588         }
 3589 
 3590         if (revents == 0) {
 3591                 if (poll_events & (POLLIN | POLLRDNORM)) {
 3592                         sc->mfi_poll_waiting = 1;
 3593                         selrecord(td, &sc->mfi_select);
 3594                 }
 3595         }
 3596 
 3597         return revents;
 3598 }
 3599 
 3600 static void
 3601 mfi_dump_all(void)
 3602 {
 3603         struct mfi_softc *sc;
 3604         struct mfi_command *cm;
 3605         devclass_t dc;
 3606         time_t deadline;
 3607         int timedout;
 3608         int i;
 3609 
 3610         dc = devclass_find("mfi");
 3611         if (dc == NULL) {
 3612                 printf("No mfi dev class\n");
 3613                 return;
 3614         }
 3615 
 3616         for (i = 0; ; i++) {
 3617                 sc = devclass_get_softc(dc, i);
 3618                 if (sc == NULL)
 3619                         break;
 3620                 device_printf(sc->mfi_dev, "Dumping\n\n");
 3621                 timedout = 0;
 3622                 deadline = time_uptime - MFI_CMD_TIMEOUT;
 3623                 mtx_lock(&sc->mfi_io_lock);
 3624                 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
 3625                         if (cm->cm_timestamp < deadline) {
 3626                                 device_printf(sc->mfi_dev,
 3627                                     "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
 3628                                     cm, (int)(time_uptime - cm->cm_timestamp));
 3629                                 MFI_PRINT_CMD(cm);
 3630                                 timedout++;
 3631                         }
 3632                 }
 3633 
 3634 #if 0
 3635                 if (timedout)
 3636                         MFI_DUMP_CMDS(SC);
 3637 #endif
 3638 
 3639                 mtx_unlock(&sc->mfi_io_lock);
 3640         }
 3641 
 3642         return;
 3643 }
 3644 
 3645 static void
 3646 mfi_timeout(void *data)
 3647 {
 3648         struct mfi_softc *sc = (struct mfi_softc *)data;
 3649         struct mfi_command *cm;
 3650         time_t deadline;
 3651         int timedout = 0;
 3652 
 3653         deadline = time_uptime - MFI_CMD_TIMEOUT;
 3654         if (sc->adpreset == 0) {
 3655                 if (!mfi_tbolt_reset(sc)) {
 3656                         callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
 3657                         return;
 3658                 }
 3659         }
 3660         mtx_lock(&sc->mfi_io_lock);
 3661         TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
 3662                 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
 3663                         continue;
 3664                 if (cm->cm_timestamp < deadline) {
 3665                         if (sc->adpreset != 0 && sc->issuepend_done == 0) {
 3666                                 cm->cm_timestamp = time_uptime;
 3667                         } else {
 3668                                 device_printf(sc->mfi_dev,
 3669                                     "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
 3670                                      cm, (int)(time_uptime - cm->cm_timestamp)
 3671                                      );
 3672                                 MFI_PRINT_CMD(cm);
 3673                                 MFI_VALIDATE_CMD(sc, cm);
 3674                                 timedout++;
 3675                         }
 3676                 }
 3677         }
 3678 
 3679 #if 0
 3680         if (timedout)
 3681                 MFI_DUMP_CMDS(SC);
 3682 #endif
 3683 
 3684         mtx_unlock(&sc->mfi_io_lock);
 3685 
 3686         callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
 3687             mfi_timeout, sc);
 3688 
 3689         if (0)
 3690                 mfi_dump_all();
 3691         return;
 3692 }

Cache object: 31b4f9e454f58392d2445b254e84a568


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.