The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mfi/mfi.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2006 IronPort Systems
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 /*-
   27  * Copyright (c) 2007 LSI Corp.
   28  * Copyright (c) 2007 Rajesh Prabhakaran.
   29  * All rights reserved.
   30  *
   31  * Redistribution and use in source and binary forms, with or without
   32  * modification, are permitted provided that the following conditions
   33  * are met:
   34  * 1. Redistributions of source code must retain the above copyright
   35  *    notice, this list of conditions and the following disclaimer.
   36  * 2. Redistributions in binary form must reproduce the above copyright
   37  *    notice, this list of conditions and the following disclaimer in the
   38  *    documentation and/or other materials provided with the distribution.
   39  *
   40  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   50  * SUCH DAMAGE.
   51  */
   52 
   53 #include <sys/cdefs.h>
   54 __FBSDID("$FreeBSD$");
   55 
   56 #include "opt_mfi.h"
   57 
   58 #include <sys/param.h>
   59 #include <sys/systm.h>
   60 #include <sys/sysctl.h>
   61 #include <sys/malloc.h>
   62 #include <sys/kernel.h>
   63 #include <sys/poll.h>
   64 #include <sys/selinfo.h>
   65 #include <sys/bus.h>
   66 #include <sys/conf.h>
   67 #include <sys/eventhandler.h>
   68 #include <sys/rman.h>
   69 #include <sys/bus_dma.h>
   70 #include <sys/bio.h>
   71 #include <sys/ioccom.h>
   72 #include <sys/uio.h>
   73 #include <sys/proc.h>
   74 #include <sys/signalvar.h>
   75 
   76 #include <machine/bus.h>
   77 #include <machine/resource.h>
   78 
   79 #include <dev/mfi/mfireg.h>
   80 #include <dev/mfi/mfi_ioctl.h>
   81 #include <dev/mfi/mfivar.h>
   82 
   83 static int      mfi_alloc_commands(struct mfi_softc *);
   84 static int      mfi_comms_init(struct mfi_softc *);
   85 static int      mfi_wait_command(struct mfi_softc *, struct mfi_command *);
   86 static int      mfi_get_controller_info(struct mfi_softc *);
   87 static int      mfi_get_log_state(struct mfi_softc *,
   88                     struct mfi_evt_log_state **);
   89 static int      mfi_parse_entries(struct mfi_softc *, int, int);
   90 static int      mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
   91                     uint32_t, void **, size_t);
   92 static void     mfi_data_cb(void *, bus_dma_segment_t *, int, int);
   93 static void     mfi_startup(void *arg);
   94 static void     mfi_intr(void *arg);
   95 static void     mfi_ldprobe(struct mfi_softc *sc);
   96 static int      mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
   97 static void     mfi_aen_complete(struct mfi_command *);
   98 static int      mfi_aen_setup(struct mfi_softc *, uint32_t);
   99 static int      mfi_add_ld(struct mfi_softc *sc, int);
  100 static void     mfi_add_ld_complete(struct mfi_command *);
  101 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
  102 static void     mfi_bio_complete(struct mfi_command *);
  103 static int      mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
  104 static int      mfi_send_frame(struct mfi_softc *, struct mfi_command *);
  105 static void     mfi_complete(struct mfi_softc *, struct mfi_command *);
  106 static int      mfi_abort(struct mfi_softc *, struct mfi_command *);
  107 static int      mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, d_thread_t *);
  108 static void     mfi_timeout(void *);
  109 static int      mfi_user_command(struct mfi_softc *,
  110                     struct mfi_ioc_passthru *);
  111 static void     mfi_enable_intr_xscale(struct mfi_softc *sc);
  112 static void     mfi_enable_intr_ppc(struct mfi_softc *sc);
  113 static int32_t  mfi_read_fw_status_xscale(struct mfi_softc *sc);
  114 static int32_t  mfi_read_fw_status_ppc(struct mfi_softc *sc);
  115 static int      mfi_check_clear_intr_xscale(struct mfi_softc *sc);
  116 static int      mfi_check_clear_intr_ppc(struct mfi_softc *sc);
  117 static void     mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
  118 static void     mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
  119 
  120 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
  121 static int      mfi_event_locale = MFI_EVT_LOCALE_ALL;
  122 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
  123 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
  124             0, "event message locale");
  125 
  126 static int      mfi_event_class = MFI_EVT_CLASS_INFO;
  127 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
  128 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
  129           0, "event message class");
  130 
  131 static int      mfi_max_cmds = 128;
  132 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
  133 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
  134            0, "Max commands");
  135 
  136 /* Management interface */
  137 static d_open_t         mfi_open;
  138 static d_close_t        mfi_close;
  139 static d_ioctl_t        mfi_ioctl;
  140 static d_poll_t         mfi_poll;
  141 
  142 static struct cdevsw mfi_cdevsw = {
  143         .d_version =    D_VERSION,
  144         .d_flags =      0,
  145         .d_open =       mfi_open,
  146         .d_close =      mfi_close,
  147         .d_ioctl =      mfi_ioctl,
  148         .d_poll =       mfi_poll,
  149         .d_name =       "mfi",
  150 };
  151 
  152 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
  153 
  154 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
  155 
  156 static void
  157 mfi_enable_intr_xscale(struct mfi_softc *sc)
  158 {
  159         MFI_WRITE4(sc, MFI_OMSK, 0x01);
  160 }
  161 
  162 static void
  163 mfi_enable_intr_ppc(struct mfi_softc *sc)
  164 {
  165         MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
  166         if (sc->mfi_flags & MFI_FLAGS_1078) {
  167                 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
  168         } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
  169                 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
  170         }
  171 }
  172 
  173 static int32_t
  174 mfi_read_fw_status_xscale(struct mfi_softc *sc)
  175 {
  176         return MFI_READ4(sc, MFI_OMSG0);
  177 }
  178 
  179 static int32_t
  180 mfi_read_fw_status_ppc(struct mfi_softc *sc)
  181 {
  182         return MFI_READ4(sc, MFI_OSP0);
  183 }
  184 
  185 static int
  186 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
  187 {
  188         int32_t status;
  189 
  190         status = MFI_READ4(sc, MFI_OSTS);
  191         if ((status & MFI_OSTS_INTR_VALID) == 0)
  192                 return 1;
  193 
  194         MFI_WRITE4(sc, MFI_OSTS, status);
  195         return 0;
  196 }
  197 
  198 static int
  199 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
  200 {
  201         int32_t status;
  202 
  203         status = MFI_READ4(sc, MFI_OSTS);
  204         if (sc->mfi_flags & MFI_FLAGS_1078) {
  205                 if (!(status & MFI_1078_RM)) {
  206                         return 1;
  207                 }
  208         } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
  209                 if (!(status & MFI_GEN2_RM)) {
  210                         return 1;
  211                 }
  212         }
  213 
  214         MFI_WRITE4(sc, MFI_ODCR0, status);
  215         return 0;
  216 }
  217 
  218 static void
  219 mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
  220 {
  221         MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
  222 }
  223 
  224 static void
  225 mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
  226 {
  227         MFI_WRITE4(sc, MFI_IQP, (bus_add |frame_cnt <<1)|1 );
  228 }
  229 
  230 static int
  231 mfi_transition_firmware(struct mfi_softc *sc)
  232 {
  233         int32_t fw_state, cur_state;
  234         int max_wait, i;
  235 
  236         fw_state = sc->mfi_read_fw_status(sc)& MFI_FWSTATE_MASK;
  237         while (fw_state != MFI_FWSTATE_READY) {
  238                 if (bootverbose)
  239                         device_printf(sc->mfi_dev, "Waiting for firmware to "
  240                         "become ready\n");
  241                 cur_state = fw_state;
  242                 switch (fw_state) {
  243                 case MFI_FWSTATE_FAULT:
  244                         device_printf(sc->mfi_dev, "Firmware fault\n");
  245                         return (ENXIO);
  246                 case MFI_FWSTATE_WAIT_HANDSHAKE:
  247                         MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
  248                         max_wait = 2;
  249                         break;
  250                 case MFI_FWSTATE_OPERATIONAL:
  251                         MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
  252                         max_wait = 10;
  253                         break;
  254                 case MFI_FWSTATE_UNDEFINED:
  255                 case MFI_FWSTATE_BB_INIT:
  256                         max_wait = 2;
  257                         break;
  258                 case MFI_FWSTATE_FW_INIT:
  259                 case MFI_FWSTATE_DEVICE_SCAN:
  260                 case MFI_FWSTATE_FLUSH_CACHE:
  261                         max_wait = 20;
  262                         break;
  263                 default:
  264                         device_printf(sc->mfi_dev,"Unknown firmware state %d\n",
  265                             fw_state);
  266                         return (ENXIO);
  267                 }
  268                 for (i = 0; i < (max_wait * 10); i++) {
  269                         fw_state = sc->mfi_read_fw_status(sc) & MFI_FWSTATE_MASK;
  270                         if (fw_state == cur_state)
  271                                 DELAY(100000);
  272                         else
  273                                 break;
  274                 }
  275                 if (fw_state == cur_state) {
  276                         device_printf(sc->mfi_dev, "firmware stuck in state "
  277                             "%#x\n", fw_state);
  278                         return (ENXIO);
  279                 }
  280         }
  281         return (0);
  282 }
  283 
  284 static void
  285 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  286 {
  287         uint32_t *addr;
  288 
  289         addr = arg;
  290         *addr = segs[0].ds_addr;
  291 }
  292 
  293 int
  294 mfi_attach(struct mfi_softc *sc)
  295 {
  296         uint32_t status;
  297         int error, commsz, framessz, sensesz;
  298         int frames, unit, max_fw_sge;
  299     device_printf(sc->mfi_dev, "Megaraid SAS driver Ver 2.00 \n");
  300 
  301         mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
  302         sx_init(&sc->mfi_config_lock, "MFI config");
  303         TAILQ_INIT(&sc->mfi_ld_tqh);
  304         TAILQ_INIT(&sc->mfi_aen_pids);
  305         TAILQ_INIT(&sc->mfi_cam_ccbq);
  306 
  307         mfi_initq_free(sc);
  308         mfi_initq_ready(sc);
  309         mfi_initq_busy(sc);
  310         mfi_initq_bio(sc);
  311 
  312         if (sc->mfi_flags & MFI_FLAGS_1064R) {
  313                 sc->mfi_enable_intr = mfi_enable_intr_xscale;
  314                 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
  315                 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
  316                 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
  317         }
  318         else {
  319                 sc->mfi_enable_intr =  mfi_enable_intr_ppc;
  320                 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
  321                 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
  322                 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
  323         }
  324 
  325 
  326         /* Before we get too far, see if the firmware is working */
  327         if ((error = mfi_transition_firmware(sc)) != 0) {
  328                 device_printf(sc->mfi_dev, "Firmware not in READY state, "
  329                     "error %d\n", error);
  330                 return (ENXIO);
  331         }
  332 
  333         /*
  334          * Get information needed for sizing the contiguous memory for the
  335          * frame pool.  Size down the sgl parameter since we know that
  336          * we will never need more than what's required for MAXPHYS.
  337          * It would be nice if these constants were available at runtime
  338          * instead of compile time.
  339          */
  340         status = sc->mfi_read_fw_status(sc);
  341         sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
  342         max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
  343         sc->mfi_max_sge = min(max_fw_sge, ((MAXPHYS / PAGE_SIZE) + 1));
  344 
  345         /*
  346          * Create the dma tag for data buffers.  Used both for block I/O
  347          * and for various internal data queries.
  348          */
  349         if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  350                                 1, 0,                   /* algnmnt, boundary */
  351                                 BUS_SPACE_MAXADDR,      /* lowaddr */
  352                                 BUS_SPACE_MAXADDR,      /* highaddr */
  353                                 NULL, NULL,             /* filter, filterarg */
  354                                 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
  355                                 sc->mfi_max_sge,        /* nsegments */
  356                                 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
  357                                 BUS_DMA_ALLOCNOW,       /* flags */
  358                                 busdma_lock_mutex,      /* lockfunc */
  359                                 &sc->mfi_io_lock,       /* lockfuncarg */
  360                                 &sc->mfi_buffer_dmat)) {
  361                 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
  362                 return (ENOMEM);
  363         }
  364 
  365         /*
  366          * Allocate DMA memory for the comms queues.  Keep it under 4GB for
  367          * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
  368          * entry, so the calculated size here will be will be 1 more than
  369          * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
  370          */
  371         commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
  372             sizeof(struct mfi_hwcomms);
  373         if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  374                                 1, 0,                   /* algnmnt, boundary */
  375                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  376                                 BUS_SPACE_MAXADDR,      /* highaddr */
  377                                 NULL, NULL,             /* filter, filterarg */
  378                                 commsz,                 /* maxsize */
  379                                 1,                      /* msegments */
  380                                 commsz,                 /* maxsegsize */
  381                                 0,                      /* flags */
  382                                 NULL, NULL,             /* lockfunc, lockarg */
  383                                 &sc->mfi_comms_dmat)) {
  384                 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
  385                 return (ENOMEM);
  386         }
  387         if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
  388             BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
  389                 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
  390                 return (ENOMEM);
  391         }
  392         bzero(sc->mfi_comms, commsz);
  393         bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
  394             sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
  395 
  396         /*
  397          * Allocate DMA memory for the command frames.  Keep them in the
  398          * lower 4GB for efficiency.  Calculate the size of the commands at
  399          * the same time; each command is one 64 byte frame plus a set of
  400          * additional frames for holding sg lists or other data.
  401          * The assumption here is that the SG list will start at the second
  402          * frame and not use the unused bytes in the first frame.  While this
  403          * isn't technically correct, it simplifies the calculation and allows
  404          * for command frames that might be larger than an mfi_io_frame.
  405          */
  406         if (sizeof(bus_addr_t) == 8) {
  407                 sc->mfi_sge_size = sizeof(struct mfi_sg64);
  408                 sc->mfi_flags |= MFI_FLAGS_SG64;
  409         } else {
  410                 sc->mfi_sge_size = sizeof(struct mfi_sg32);
  411         }
  412         frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
  413         sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
  414         framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
  415         if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  416                                 64, 0,                  /* algnmnt, boundary */
  417                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  418                                 BUS_SPACE_MAXADDR,      /* highaddr */
  419                                 NULL, NULL,             /* filter, filterarg */
  420                                 framessz,               /* maxsize */
  421                                 1,                      /* nsegments */
  422                                 framessz,               /* maxsegsize */
  423                                 0,                      /* flags */
  424                                 NULL, NULL,             /* lockfunc, lockarg */
  425                                 &sc->mfi_frames_dmat)) {
  426                 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
  427                 return (ENOMEM);
  428         }
  429         if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
  430             BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
  431                 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
  432                 return (ENOMEM);
  433         }
  434         bzero(sc->mfi_frames, framessz);
  435         bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
  436             sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
  437 
  438         /*
  439          * Allocate DMA memory for the frame sense data.  Keep them in the
  440          * lower 4GB for efficiency
  441          */
  442         sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
  443         if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  444                                 4, 0,                   /* algnmnt, boundary */
  445                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  446                                 BUS_SPACE_MAXADDR,      /* highaddr */
  447                                 NULL, NULL,             /* filter, filterarg */
  448                                 sensesz,                /* maxsize */
  449                                 1,                      /* nsegments */
  450                                 sensesz,                /* maxsegsize */
  451                                 0,                      /* flags */
  452                                 NULL, NULL,             /* lockfunc, lockarg */
  453                                 &sc->mfi_sense_dmat)) {
  454                 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
  455                 return (ENOMEM);
  456         }
  457         if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
  458             BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
  459                 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
  460                 return (ENOMEM);
  461         }
  462         bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
  463             sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
  464 
  465         if ((error = mfi_alloc_commands(sc)) != 0)
  466                 return (error);
  467 
  468         if ((error = mfi_comms_init(sc)) != 0)
  469                 return (error);
  470 
  471         if ((error = mfi_get_controller_info(sc)) != 0)
  472                 return (error);
  473 
  474         mtx_lock(&sc->mfi_io_lock);
  475         if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
  476                 mtx_unlock(&sc->mfi_io_lock);
  477                 return (error);
  478         }
  479         mtx_unlock(&sc->mfi_io_lock);
  480 
  481         /*
  482          * Set up the interrupt handler.  XXX This should happen in
  483          * mfi_pci.c
  484          */
  485         sc->mfi_irq_rid = 0;
  486         if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
  487             &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
  488                 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
  489                 return (EINVAL);
  490         }
  491         if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
  492             mfi_intr, sc, &sc->mfi_intr)) {
  493                 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
  494                 return (EINVAL);
  495         }
  496 
  497         /* Register a config hook to probe the bus for arrays */
  498         sc->mfi_ich.ich_func = mfi_startup;
  499         sc->mfi_ich.ich_arg = sc;
  500         if (config_intrhook_establish(&sc->mfi_ich) != 0) {
  501                 device_printf(sc->mfi_dev, "Cannot establish configuration "
  502                     "hook\n");
  503                 return (EINVAL);
  504         }
  505 
  506         /*
  507          * Register a shutdown handler.
  508          */
  509         if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
  510             sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
  511                 device_printf(sc->mfi_dev, "Warning: shutdown event "
  512                     "registration failed\n");
  513         }
  514 
  515         /*
  516          * Create the control device for doing management
  517          */
  518         unit = device_get_unit(sc->mfi_dev);
  519         sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
  520             0640, "mfi%d", unit);
  521         if (unit == 0)
  522                 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
  523         if (sc->mfi_cdev != NULL)
  524                 sc->mfi_cdev->si_drv1 = sc;
  525         SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
  526             SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
  527             OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
  528             &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
  529         SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
  530             SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
  531             OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
  532             &sc->mfi_keep_deleted_volumes, 0,
  533             "Don't detach the mfid device for a busy volume that is deleted");
  534 
  535         device_add_child(sc->mfi_dev, "mfip", -1);
  536         bus_generic_attach(sc->mfi_dev);
  537 
  538         /* Start the timeout watchdog */
  539         callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
  540         callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
  541             mfi_timeout, sc);
  542 
  543         return (0);
  544 }
  545 
  546 static int
  547 mfi_alloc_commands(struct mfi_softc *sc)
  548 {
  549         struct mfi_command *cm;
  550         int i, ncmds;
  551 
  552         /*
  553          * XXX Should we allocate all the commands up front, or allocate on
  554          * demand later like 'aac' does?
  555          */
  556         ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
  557         if (bootverbose)
  558                 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
  559                    "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
  560 
  561         sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
  562             M_WAITOK | M_ZERO);
  563 
  564         for (i = 0; i < ncmds; i++) {
  565                 cm = &sc->mfi_commands[i];
  566                 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
  567                     sc->mfi_cmd_size * i);
  568                 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
  569                     sc->mfi_cmd_size * i;
  570                 cm->cm_frame->header.context = i;
  571                 cm->cm_sense = &sc->mfi_sense[i];
  572                 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
  573                 cm->cm_sc = sc;
  574                 cm->cm_index = i;
  575                 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
  576                     &cm->cm_dmamap) == 0)
  577                         mfi_release_command(cm);
  578                 else
  579                         break;
  580                 sc->mfi_total_cmds++;
  581         }
  582 
  583         return (0);
  584 }
  585 
  586 void
  587 mfi_release_command(struct mfi_command *cm)
  588 {
  589         struct mfi_frame_header *hdr;
  590         uint32_t *hdr_data;
  591 
  592         /*
  593          * Zero out the important fields of the frame, but make sure the
  594          * context field is preserved.  For efficiency, handle the fields
  595          * as 32 bit words.  Clear out the first S/G entry too for safety.
  596          */
  597         hdr = &cm->cm_frame->header;
  598         if (cm->cm_data != NULL && hdr->sg_count) {
  599                 cm->cm_sg->sg32[0].len = 0;
  600                 cm->cm_sg->sg32[0].addr = 0;
  601         }
  602 
  603         hdr_data = (uint32_t *)cm->cm_frame;
  604         hdr_data[0] = 0;        /* cmd, sense_len, cmd_status, scsi_status */
  605         hdr_data[1] = 0;        /* target_id, lun_id, cdb_len, sg_count */
  606         hdr_data[4] = 0;        /* flags, timeout */
  607         hdr_data[5] = 0;        /* data_len */
  608 
  609         cm->cm_extra_frames = 0;
  610         cm->cm_flags = 0;
  611         cm->cm_complete = NULL;
  612         cm->cm_private = NULL;
  613         cm->cm_data = NULL;
  614         cm->cm_sg = 0;
  615         cm->cm_total_frame_size = 0;
  616 
  617         mfi_enqueue_free(cm);
  618 }
  619 
  620 static int
  621 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
  622     void **bufp, size_t bufsize)
  623 {
  624         struct mfi_command *cm;
  625         struct mfi_dcmd_frame *dcmd;
  626         void *buf = NULL;
  627         
  628         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
  629         
  630         cm = mfi_dequeue_free(sc);
  631         if (cm == NULL)
  632                 return (EBUSY);
  633 
  634         if ((bufsize > 0) && (bufp != NULL)) {
  635                 if (*bufp == NULL) {
  636                         buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
  637                         if (buf == NULL) {
  638                                 mfi_release_command(cm);
  639                                 return (ENOMEM);
  640                         }
  641                         *bufp = buf;
  642                 } else {
  643                         buf = *bufp;
  644                 }
  645         }
  646 
  647         dcmd =  &cm->cm_frame->dcmd;
  648         bzero(dcmd->mbox, MFI_MBOX_SIZE);
  649         dcmd->header.cmd = MFI_CMD_DCMD;
  650         dcmd->header.timeout = 0;
  651         dcmd->header.flags = 0;
  652         dcmd->header.data_len = bufsize;
  653         dcmd->opcode = opcode;
  654         cm->cm_sg = &dcmd->sgl;
  655         cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
  656         cm->cm_flags = 0;
  657         cm->cm_data = buf;
  658         cm->cm_private = buf;
  659         cm->cm_len = bufsize;
  660 
  661         *cmp = cm;
  662         if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
  663                 *bufp = buf;
  664         return (0);
  665 }
  666 
  667 static int
  668 mfi_comms_init(struct mfi_softc *sc)
  669 {
  670         struct mfi_command *cm;
  671         struct mfi_init_frame *init;
  672         struct mfi_init_qinfo *qinfo;
  673         int error;
  674 
  675         mtx_lock(&sc->mfi_io_lock);
  676         if ((cm = mfi_dequeue_free(sc)) == NULL)
  677                 return (EBUSY);
  678 
  679         /*
  680          * Abuse the SG list area of the frame to hold the init_qinfo
  681          * object;
  682          */
  683         init = &cm->cm_frame->init;
  684         qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
  685 
  686         bzero(qinfo, sizeof(struct mfi_init_qinfo));
  687         qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
  688         qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
  689             offsetof(struct mfi_hwcomms, hw_reply_q);
  690         qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
  691             offsetof(struct mfi_hwcomms, hw_pi);
  692         qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
  693             offsetof(struct mfi_hwcomms, hw_ci);
  694 
  695         init->header.cmd = MFI_CMD_INIT;
  696         init->header.data_len = sizeof(struct mfi_init_qinfo);
  697         init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
  698         cm->cm_data = NULL;
  699         cm->cm_flags = MFI_CMD_POLLED;
  700 
  701         if ((error = mfi_mapcmd(sc, cm)) != 0) {
  702                 device_printf(sc->mfi_dev, "failed to send init command\n");
  703                 mtx_unlock(&sc->mfi_io_lock);
  704                 return (error);
  705         }
  706         mfi_release_command(cm);
  707         mtx_unlock(&sc->mfi_io_lock);
  708 
  709         return (0);
  710 }
  711 
  712 static int
  713 mfi_get_controller_info(struct mfi_softc *sc)
  714 {
  715         struct mfi_command *cm = NULL;
  716         struct mfi_ctrl_info *ci = NULL;
  717         uint32_t max_sectors_1, max_sectors_2;
  718         int error;
  719 
  720         mtx_lock(&sc->mfi_io_lock);
  721         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
  722             (void **)&ci, sizeof(*ci));
  723         if (error)
  724                 goto out;
  725         cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
  726 
  727         if ((error = mfi_mapcmd(sc, cm)) != 0) {
  728                 device_printf(sc->mfi_dev, "Failed to get controller info\n");
  729                 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
  730                     MFI_SECTOR_LEN;
  731                 error = 0;
  732                 goto out;
  733         }
  734 
  735         bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
  736             BUS_DMASYNC_POSTREAD);
  737         bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
  738 
  739         max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
  740         max_sectors_2 = ci->max_request_size;
  741         sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
  742 
  743 out:
  744         if (ci)
  745                 free(ci, M_MFIBUF);
  746         if (cm)
  747                 mfi_release_command(cm);
  748         mtx_unlock(&sc->mfi_io_lock);
  749         return (error);
  750 }
  751 
  752 static int
  753 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
  754 {
  755         struct mfi_command *cm = NULL;
  756         int error;
  757 
  758         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
  759             (void **)log_state, sizeof(**log_state));
  760         if (error)
  761                 goto out;
  762         cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
  763 
  764         if ((error = mfi_mapcmd(sc, cm)) != 0) {
  765                 device_printf(sc->mfi_dev, "Failed to get log state\n");
  766                 goto out;
  767         }
  768 
  769         bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
  770             BUS_DMASYNC_POSTREAD);
  771         bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
  772 
  773 out:
  774         if (cm)
  775                 mfi_release_command(cm);
  776 
  777         return (error);
  778 }
  779 
  780 static int
  781 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
  782 {
  783         struct mfi_evt_log_state *log_state = NULL;
  784         union mfi_evt class_locale;
  785         int error = 0;
  786         uint32_t seq;
  787 
  788         class_locale.members.reserved = 0;
  789         class_locale.members.locale = mfi_event_locale;
  790         class_locale.members.class  = mfi_event_class;
  791 
  792         if (seq_start == 0) {
  793                 error = mfi_get_log_state(sc, &log_state);
  794                 if (error) {
  795                         if (log_state)
  796                                 free(log_state, M_MFIBUF);
  797                         return (error);
  798                 }
  799 
  800                 /*
  801                  * Walk through any events that fired since the last
  802                  * shutdown.
  803                  */
  804                 mfi_parse_entries(sc, log_state->shutdown_seq_num,
  805                     log_state->newest_seq_num);
  806                 seq = log_state->newest_seq_num;
  807         } else
  808                 seq = seq_start;
  809         mfi_aen_register(sc, seq, class_locale.word);
  810         free(log_state, M_MFIBUF);
  811 
  812         return 0;
  813 }
  814 
  815 static int
  816 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
  817 {
  818 
  819         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
  820         cm->cm_complete = NULL;
  821 
  822 
  823         /*
  824          * MegaCli can issue a DCMD of 0.  In this case do nothing
  825          * and return 0 to it as status
  826          */
  827         if (cm->cm_frame->dcmd.opcode == 0) {
  828                 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
  829                 cm->cm_error = 0;
  830                 return (cm->cm_error);
  831         }
  832         mfi_enqueue_ready(cm);
  833         mfi_startio(sc);
  834         if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
  835                 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
  836         return (cm->cm_error);
  837 }
  838 
  839 void
  840 mfi_free(struct mfi_softc *sc)
  841 {
  842         struct mfi_command *cm;
  843         int i;
  844 
  845         callout_drain(&sc->mfi_watchdog_callout);
  846 
  847         if (sc->mfi_cdev != NULL)
  848                 destroy_dev(sc->mfi_cdev);
  849 
  850         if (sc->mfi_total_cmds != 0) {
  851                 for (i = 0; i < sc->mfi_total_cmds; i++) {
  852                         cm = &sc->mfi_commands[i];
  853                         bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
  854                 }
  855                 free(sc->mfi_commands, M_MFIBUF);
  856         }
  857 
  858         if (sc->mfi_intr)
  859                 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
  860         if (sc->mfi_irq != NULL)
  861                 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
  862                     sc->mfi_irq);
  863 
  864         if (sc->mfi_sense_busaddr != 0)
  865                 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
  866         if (sc->mfi_sense != NULL)
  867                 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
  868                     sc->mfi_sense_dmamap);
  869         if (sc->mfi_sense_dmat != NULL)
  870                 bus_dma_tag_destroy(sc->mfi_sense_dmat);
  871 
  872         if (sc->mfi_frames_busaddr != 0)
  873                 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
  874         if (sc->mfi_frames != NULL)
  875                 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
  876                     sc->mfi_frames_dmamap);
  877         if (sc->mfi_frames_dmat != NULL)
  878                 bus_dma_tag_destroy(sc->mfi_frames_dmat);
  879 
  880         if (sc->mfi_comms_busaddr != 0)
  881                 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
  882         if (sc->mfi_comms != NULL)
  883                 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
  884                     sc->mfi_comms_dmamap);
  885         if (sc->mfi_comms_dmat != NULL)
  886                 bus_dma_tag_destroy(sc->mfi_comms_dmat);
  887 
  888         if (sc->mfi_buffer_dmat != NULL)
  889                 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
  890         if (sc->mfi_parent_dmat != NULL)
  891                 bus_dma_tag_destroy(sc->mfi_parent_dmat);
  892 
  893         if (mtx_initialized(&sc->mfi_io_lock)) {
  894                 mtx_destroy(&sc->mfi_io_lock);
  895                 sx_destroy(&sc->mfi_config_lock);
  896         }
  897 
  898         return;
  899 }
  900 
  901 static void
  902 mfi_startup(void *arg)
  903 {
  904         struct mfi_softc *sc;
  905 
  906         sc = (struct mfi_softc *)arg;
  907 
  908         config_intrhook_disestablish(&sc->mfi_ich);
  909 
  910         sc->mfi_enable_intr(sc);
  911         sx_xlock(&sc->mfi_config_lock);
  912         mtx_lock(&sc->mfi_io_lock);
  913         mfi_ldprobe(sc);
  914         mtx_unlock(&sc->mfi_io_lock);
  915         sx_xunlock(&sc->mfi_config_lock);
  916 }
  917 
  918 static void
  919 mfi_intr(void *arg)
  920 {
  921         struct mfi_softc *sc;
  922         struct mfi_command *cm;
  923         uint32_t pi, ci, context;
  924 
  925         sc = (struct mfi_softc *)arg;
  926 
  927         if (sc->mfi_check_clear_intr(sc))
  928                 return;
  929 
  930         pi = sc->mfi_comms->hw_pi;
  931         ci = sc->mfi_comms->hw_ci;
  932         mtx_lock(&sc->mfi_io_lock);
  933         while (ci != pi) {
  934                 context = sc->mfi_comms->hw_reply_q[ci];
  935                 if (context < sc->mfi_max_fw_cmds) {
  936                         cm = &sc->mfi_commands[context];
  937                         mfi_remove_busy(cm);
  938                         cm->cm_error = 0;
  939                         mfi_complete(sc, cm);
  940                 }
  941                 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
  942                         ci = 0;
  943                 }
  944         }
  945 
  946         sc->mfi_comms->hw_ci = ci;
  947 
  948         /* Give defered I/O a chance to run */
  949         if (sc->mfi_flags & MFI_FLAGS_QFRZN)
  950                 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
  951         mfi_startio(sc);
  952         mtx_unlock(&sc->mfi_io_lock);
  953 
  954         return;
  955 }
  956 
  957 int
  958 mfi_shutdown(struct mfi_softc *sc)
  959 {
  960         struct mfi_dcmd_frame *dcmd;
  961         struct mfi_command *cm;
  962         int error;
  963 
  964         mtx_lock(&sc->mfi_io_lock);
  965         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
  966         if (error) {
  967                 mtx_unlock(&sc->mfi_io_lock);
  968                 return (error);
  969         }
  970 
  971         if (sc->mfi_aen_cm != NULL)
  972                 mfi_abort(sc, sc->mfi_aen_cm);
  973 
  974         dcmd = &cm->cm_frame->dcmd;
  975         dcmd->header.flags = MFI_FRAME_DIR_NONE;
  976         cm->cm_flags = MFI_CMD_POLLED;
  977         cm->cm_data = NULL;
  978 
  979         if ((error = mfi_mapcmd(sc, cm)) != 0) {
  980                 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
  981         }
  982 
  983         mfi_release_command(cm);
  984         mtx_unlock(&sc->mfi_io_lock);
  985         return (error);
  986 }
  987 
  988 static void
  989 mfi_ldprobe(struct mfi_softc *sc)
  990 {
  991         struct mfi_frame_header *hdr;
  992         struct mfi_command *cm = NULL;
  993         struct mfi_ld_list *list = NULL;
  994         struct mfi_disk *ld;
  995         int error, i;
  996 
  997         sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
  998         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
  999 
 1000         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
 1001             (void **)&list, sizeof(*list));
 1002         if (error)
 1003                 goto out;
 1004 
 1005         cm->cm_flags = MFI_CMD_DATAIN;
 1006         if (mfi_wait_command(sc, cm) != 0) {
 1007                 device_printf(sc->mfi_dev, "Failed to get device listing\n");
 1008                 goto out;
 1009         }
 1010 
 1011         hdr = &cm->cm_frame->header;
 1012         if (hdr->cmd_status != MFI_STAT_OK) {
 1013                 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
 1014                     hdr->cmd_status);
 1015                 goto out;
 1016         }
 1017 
 1018         for (i = 0; i < list->ld_count; i++) {
 1019                 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 1020                         if (ld->ld_id == list->ld_list[i].ld.v.target_id)
 1021                                 goto skip_add;
 1022                 }
 1023                 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
 1024         skip_add:;
 1025         }
 1026 out:
 1027         if (list)
 1028                 free(list, M_MFIBUF);
 1029         if (cm)
 1030                 mfi_release_command(cm);
 1031 
 1032         return;
 1033 }
 1034 
 1035 /*
 1036  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
 1037  * the bits in 24-31 are all set, then it is the number of seconds since
 1038  * boot.
 1039  */
 1040 static const char *
 1041 format_timestamp(uint32_t timestamp)
 1042 {
 1043         static char buffer[32];
 1044 
 1045         if ((timestamp & 0xff000000) == 0xff000000)
 1046                 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
 1047                     0x00ffffff);
 1048         else
 1049                 snprintf(buffer, sizeof(buffer), "%us", timestamp);
 1050         return (buffer);
 1051 }
 1052 
 1053 static const char *
 1054 format_class(int8_t class)
 1055 {
 1056         static char buffer[6];
 1057 
 1058         switch (class) {
 1059         case MFI_EVT_CLASS_DEBUG:
 1060                 return ("debug");
 1061         case MFI_EVT_CLASS_PROGRESS:
 1062                 return ("progress");
 1063         case MFI_EVT_CLASS_INFO:
 1064                 return ("info");
 1065         case MFI_EVT_CLASS_WARNING:
 1066                 return ("WARN");
 1067         case MFI_EVT_CLASS_CRITICAL:
 1068                 return ("CRIT");
 1069         case MFI_EVT_CLASS_FATAL:
 1070                 return ("FATAL");
 1071         case MFI_EVT_CLASS_DEAD:
 1072                 return ("DEAD");
 1073         default:
 1074                 snprintf(buffer, sizeof(buffer), "%d", class);
 1075                 return (buffer);
 1076         }
 1077 }
 1078 
 1079 static void
 1080 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
 1081 {
 1082 
 1083         device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
 1084             format_timestamp(detail->time), detail->class.members.locale,
 1085             format_class(detail->class.members.class), detail->description);
 1086 }
 1087 
 1088 static int
 1089 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
 1090 {
 1091         struct mfi_command *cm;
 1092         struct mfi_dcmd_frame *dcmd;
 1093         union mfi_evt current_aen, prior_aen;
 1094         struct mfi_evt_detail *ed = NULL;
 1095         int error = 0;
 1096 
 1097         current_aen.word = locale;
 1098         if (sc->mfi_aen_cm != NULL) {
 1099                 prior_aen.word =
 1100                     ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
 1101                 if (prior_aen.members.class <= current_aen.members.class &&
 1102                     !((prior_aen.members.locale & current_aen.members.locale)
 1103                     ^current_aen.members.locale)) {
 1104                         return (0);
 1105                 } else {
 1106                         prior_aen.members.locale |= current_aen.members.locale;
 1107                         if (prior_aen.members.class
 1108                             < current_aen.members.class)
 1109                                 current_aen.members.class =
 1110                                     prior_aen.members.class;
 1111                         mfi_abort(sc, sc->mfi_aen_cm);
 1112                 }
 1113         }
 1114 
 1115         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
 1116             (void **)&ed, sizeof(*ed));
 1117         if (error) {
 1118                 goto out;
 1119         }
 1120 
 1121         dcmd = &cm->cm_frame->dcmd;
 1122         ((uint32_t *)&dcmd->mbox)[0] = seq;
 1123         ((uint32_t *)&dcmd->mbox)[1] = locale;
 1124         cm->cm_flags = MFI_CMD_DATAIN;
 1125         cm->cm_complete = mfi_aen_complete;
 1126 
 1127         sc->mfi_aen_cm = cm;
 1128 
 1129         mfi_enqueue_ready(cm);
 1130         mfi_startio(sc);
 1131 
 1132 out:
 1133         return (error);
 1134 }
 1135 
 1136 static void
 1137 mfi_aen_complete(struct mfi_command *cm)
 1138 {
 1139         struct mfi_frame_header *hdr;
 1140         struct mfi_softc *sc;
 1141         struct mfi_evt_detail *detail;
 1142         struct mfi_aen *mfi_aen_entry, *tmp;
 1143         int seq = 0, aborted = 0;
 1144 
 1145         sc = cm->cm_sc;
 1146         hdr = &cm->cm_frame->header;
 1147 
 1148         if (sc->mfi_aen_cm == NULL)
 1149                 return;
 1150 
 1151         if (sc->mfi_aen_cm->cm_aen_abort || hdr->cmd_status == 0xff) {
 1152                 sc->mfi_aen_cm->cm_aen_abort = 0;
 1153                 aborted = 1;
 1154         } else {
 1155                 sc->mfi_aen_triggered = 1;
 1156                 if (sc->mfi_poll_waiting) {
 1157                         sc->mfi_poll_waiting = 0;
 1158                         selwakeup(&sc->mfi_select);
 1159                 }
 1160                 detail = cm->cm_data;
 1161                 /*
 1162                  * XXX If this function is too expensive or is recursive, then
 1163                  * events should be put onto a queue and processed later.
 1164                  */
 1165                 mfi_decode_evt(sc, detail);
 1166                 seq = detail->seq + 1;
 1167                 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
 1168                         TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
 1169                             aen_link);
 1170                         PROC_LOCK(mfi_aen_entry->p);
 1171                         psignal(mfi_aen_entry->p, SIGIO);
 1172                         PROC_UNLOCK(mfi_aen_entry->p);
 1173                         free(mfi_aen_entry, M_MFIBUF);
 1174                 }
 1175         }
 1176 
 1177         free(cm->cm_data, M_MFIBUF);
 1178         sc->mfi_aen_cm = NULL;
 1179         wakeup(&sc->mfi_aen_cm);
 1180         mfi_release_command(cm);
 1181 
 1182         /* set it up again so the driver can catch more events */
 1183         if (!aborted) {
 1184                 mfi_aen_setup(sc, seq);
 1185         }
 1186 }
 1187 
 1188 #define MAX_EVENTS 15
 1189 
 1190 static int
 1191 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
 1192 {
 1193         struct mfi_command *cm;
 1194         struct mfi_dcmd_frame *dcmd;
 1195         struct mfi_evt_list *el;
 1196         union mfi_evt class_locale;
 1197         int error, i, seq, size;
 1198 
 1199         class_locale.members.reserved = 0;
 1200         class_locale.members.locale = mfi_event_locale;
 1201         class_locale.members.class  = mfi_event_class;
 1202 
 1203         size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
 1204                 * (MAX_EVENTS - 1);
 1205         el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
 1206         if (el == NULL)
 1207                 return (ENOMEM);
 1208 
 1209         for (seq = start_seq;;) {
 1210                 if ((cm = mfi_dequeue_free(sc)) == NULL) {
 1211                         free(el, M_MFIBUF);
 1212                         return (EBUSY);
 1213                 }
 1214 
 1215                 dcmd = &cm->cm_frame->dcmd;
 1216                 bzero(dcmd->mbox, MFI_MBOX_SIZE);
 1217                 dcmd->header.cmd = MFI_CMD_DCMD;
 1218                 dcmd->header.timeout = 0;
 1219                 dcmd->header.data_len = size;
 1220                 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
 1221                 ((uint32_t *)&dcmd->mbox)[0] = seq;
 1222                 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
 1223                 cm->cm_sg = &dcmd->sgl;
 1224                 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
 1225                 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
 1226                 cm->cm_data = el;
 1227                 cm->cm_len = size;
 1228 
 1229                 if ((error = mfi_mapcmd(sc, cm)) != 0) {
 1230                         device_printf(sc->mfi_dev,
 1231                             "Failed to get controller entries\n");
 1232                         mfi_release_command(cm);
 1233                         break;
 1234                 }
 1235 
 1236                 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
 1237                     BUS_DMASYNC_POSTREAD);
 1238                 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
 1239 
 1240                 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
 1241                         mfi_release_command(cm);
 1242                         break;
 1243                 }
 1244                 if (dcmd->header.cmd_status != MFI_STAT_OK) {
 1245                         device_printf(sc->mfi_dev,
 1246                             "Error %d fetching controller entries\n",
 1247                             dcmd->header.cmd_status);
 1248                         mfi_release_command(cm);
 1249                         break;
 1250                 }
 1251                 mfi_release_command(cm);
 1252 
 1253                 for (i = 0; i < el->count; i++) {
 1254                         /*
 1255                          * If this event is newer than 'stop_seq' then
 1256                          * break out of the loop.  Note that the log
 1257                          * is a circular buffer so we have to handle
 1258                          * the case that our stop point is earlier in
 1259                          * the buffer than our start point.
 1260                          */
 1261                         if (el->event[i].seq >= stop_seq) {
 1262                                 if (start_seq <= stop_seq)
 1263                                         break;
 1264                                 else if (el->event[i].seq < start_seq)
 1265                                         break;
 1266                         }
 1267                         mfi_decode_evt(sc, &el->event[i]);
 1268                 }
 1269                 seq = el->event[el->count - 1].seq + 1;
 1270         }
 1271 
 1272         free(el, M_MFIBUF);
 1273         return (0);
 1274 }
 1275 
 1276 static int
 1277 mfi_add_ld(struct mfi_softc *sc, int id)
 1278 {
 1279         struct mfi_command *cm;
 1280         struct mfi_dcmd_frame *dcmd = NULL;
 1281         struct mfi_ld_info *ld_info = NULL;
 1282         int error;
 1283 
 1284         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1285 
 1286         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
 1287             (void **)&ld_info, sizeof(*ld_info));
 1288         if (error) {
 1289                 device_printf(sc->mfi_dev,
 1290                     "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
 1291                 if (ld_info)
 1292                         free(ld_info, M_MFIBUF);
 1293                 return (error);
 1294         }
 1295         cm->cm_flags = MFI_CMD_DATAIN;
 1296         dcmd = &cm->cm_frame->dcmd;
 1297         dcmd->mbox[0] = id;
 1298         if (mfi_wait_command(sc, cm) != 0) {
 1299                 device_printf(sc->mfi_dev,
 1300                     "Failed to get logical drive: %d\n", id);
 1301                 free(ld_info, M_MFIBUF);
 1302                 return (0);
 1303         }
 1304 
 1305         mfi_add_ld_complete(cm);
 1306         return (0);
 1307 }
 1308 
 1309 static void
 1310 mfi_add_ld_complete(struct mfi_command *cm)
 1311 {
 1312         struct mfi_frame_header *hdr;
 1313         struct mfi_ld_info *ld_info;
 1314         struct mfi_softc *sc;
 1315         device_t child;
 1316 
 1317         sc = cm->cm_sc;
 1318         hdr = &cm->cm_frame->header;
 1319         ld_info = cm->cm_private;
 1320 
 1321         if (hdr->cmd_status != MFI_STAT_OK) {
 1322                 free(ld_info, M_MFIBUF);
 1323                 mfi_release_command(cm);
 1324                 return;
 1325         }
 1326         mfi_release_command(cm);
 1327 
 1328         mtx_unlock(&sc->mfi_io_lock);
 1329         mtx_lock(&Giant);
 1330         if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
 1331                 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
 1332                 free(ld_info, M_MFIBUF);
 1333                 mtx_unlock(&Giant);
 1334                 mtx_lock(&sc->mfi_io_lock);
 1335                 return;
 1336         }
 1337 
 1338         device_set_ivars(child, ld_info);
 1339         device_set_desc(child, "MFI Logical Disk");
 1340         bus_generic_attach(sc->mfi_dev);
 1341         mtx_unlock(&Giant);
 1342         mtx_lock(&sc->mfi_io_lock);
 1343 }
 1344 
 1345 static struct mfi_command *
 1346 mfi_bio_command(struct mfi_softc *sc)
 1347 {
 1348         struct mfi_io_frame *io;
 1349         struct mfi_command *cm;
 1350         struct bio *bio;
 1351         int flags, blkcount;
 1352 
 1353         if ((cm = mfi_dequeue_free(sc)) == NULL)
 1354                 return (NULL);
 1355 
 1356         if ((bio = mfi_dequeue_bio(sc)) == NULL) {
 1357                 mfi_release_command(cm);
 1358                 return (NULL);
 1359         }
 1360 
 1361         io = &cm->cm_frame->io;
 1362         switch (bio->bio_cmd & 0x03) {
 1363         case BIO_READ:
 1364                 io->header.cmd = MFI_CMD_LD_READ;
 1365                 flags = MFI_CMD_DATAIN;
 1366                 break;
 1367         case BIO_WRITE:
 1368                 io->header.cmd = MFI_CMD_LD_WRITE;
 1369                 flags = MFI_CMD_DATAOUT;
 1370                 break;
 1371         default:
 1372                 panic("Invalid bio command");
 1373         }
 1374 
 1375         /* Cheat with the sector length to avoid a non-constant division */
 1376         blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
 1377         io->header.target_id = (uintptr_t)bio->bio_driver1;
 1378         io->header.timeout = 0;
 1379         io->header.flags = 0;
 1380         io->header.sense_len = MFI_SENSE_LEN;
 1381         io->header.data_len = blkcount;
 1382         io->sense_addr_lo = cm->cm_sense_busaddr;
 1383         io->sense_addr_hi = 0;
 1384         io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
 1385         io->lba_lo = bio->bio_pblkno & 0xffffffff;
 1386         cm->cm_complete = mfi_bio_complete;
 1387         cm->cm_private = bio;
 1388         cm->cm_data = bio->bio_data;
 1389         cm->cm_len = bio->bio_bcount;
 1390         cm->cm_sg = &io->sgl;
 1391         cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
 1392         cm->cm_flags = flags;
 1393         return (cm);
 1394 }
 1395 
 1396 static void
 1397 mfi_bio_complete(struct mfi_command *cm)
 1398 {
 1399         struct bio *bio;
 1400         struct mfi_frame_header *hdr;
 1401         struct mfi_softc *sc;
 1402 
 1403         bio = cm->cm_private;
 1404         hdr = &cm->cm_frame->header;
 1405         sc = cm->cm_sc;
 1406 
 1407         if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) {
 1408                 bio->bio_flags |= BIO_ERROR;
 1409                 bio->bio_error = EIO;
 1410                 device_printf(sc->mfi_dev, "I/O error, status= %d "
 1411                     "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
 1412                 mfi_print_sense(cm->cm_sc, cm->cm_sense);
 1413         } else if (cm->cm_error != 0) {
 1414                 bio->bio_flags |= BIO_ERROR;
 1415         }
 1416 
 1417         mfi_release_command(cm);
 1418         mfi_disk_complete(bio);
 1419 }
 1420 
 1421 void
 1422 mfi_startio(struct mfi_softc *sc)
 1423 {
 1424         struct mfi_command *cm;
 1425         struct ccb_hdr *ccbh;
 1426 
 1427         for (;;) {
 1428                 /* Don't bother if we're short on resources */
 1429                 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
 1430                         break;
 1431 
 1432                 /* Try a command that has already been prepared */
 1433                 cm = mfi_dequeue_ready(sc);
 1434 
 1435                 if (cm == NULL) {
 1436                         if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
 1437                                 cm = sc->mfi_cam_start(ccbh);
 1438                 }
 1439 
 1440                 /* Nope, so look for work on the bioq */
 1441                 if (cm == NULL)
 1442                         cm = mfi_bio_command(sc);
 1443 
 1444                 /* No work available, so exit */
 1445                 if (cm == NULL)
 1446                         break;
 1447 
 1448                 /* Send the command to the controller */
 1449                 if (mfi_mapcmd(sc, cm) != 0) {
 1450                         mfi_requeue_ready(cm);
 1451                         break;
 1452                 }
 1453         }
 1454 }
 1455 
 1456 static int
 1457 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
 1458 {
 1459         int error, polled;
 1460 
 1461         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1462 
 1463         if (cm->cm_data != NULL) {
 1464                 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
 1465                 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
 1466                     cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
 1467                 if (error == EINPROGRESS) {
 1468                         sc->mfi_flags |= MFI_FLAGS_QFRZN;
 1469                         return (0);
 1470                 }
 1471         } else {
 1472                 error = mfi_send_frame(sc, cm);
 1473         }
 1474 
 1475         return (error);
 1476 }
 1477 
 1478 static void
 1479 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
 1480 {
 1481         struct mfi_frame_header *hdr;
 1482         struct mfi_command *cm;
 1483         union mfi_sgl *sgl;
 1484         struct mfi_softc *sc;
 1485         int i, dir;
 1486 
 1487         cm = (struct mfi_command *)arg;
 1488         sc = cm->cm_sc;
 1489         hdr = &cm->cm_frame->header;
 1490         sgl = cm->cm_sg;
 1491 
 1492         if (error) {
 1493                 printf("error %d in callback\n", error);
 1494                 cm->cm_error = error;
 1495                 mfi_complete(sc, cm);
 1496                 return;
 1497         }
 1498 
 1499         if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
 1500                 for (i = 0; i < nsegs; i++) {
 1501                         sgl->sg32[i].addr = segs[i].ds_addr;
 1502                         sgl->sg32[i].len = segs[i].ds_len;
 1503                 }
 1504         } else {
 1505                 for (i = 0; i < nsegs; i++) {
 1506                         sgl->sg64[i].addr = segs[i].ds_addr;
 1507                         sgl->sg64[i].len = segs[i].ds_len;
 1508                 }
 1509                 hdr->flags |= MFI_FRAME_SGL64;
 1510         }
 1511         hdr->sg_count = nsegs;
 1512 
 1513         dir = 0;
 1514         if (cm->cm_flags & MFI_CMD_DATAIN) {
 1515                 dir |= BUS_DMASYNC_PREREAD;
 1516                 hdr->flags |= MFI_FRAME_DIR_READ;
 1517         }
 1518         if (cm->cm_flags & MFI_CMD_DATAOUT) {
 1519                 dir |= BUS_DMASYNC_PREWRITE;
 1520                 hdr->flags |= MFI_FRAME_DIR_WRITE;
 1521         }
 1522         bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
 1523         cm->cm_flags |= MFI_CMD_MAPPED;
 1524 
 1525         /*
 1526          * Instead of calculating the total number of frames in the
 1527          * compound frame, it's already assumed that there will be at
 1528          * least 1 frame, so don't compensate for the modulo of the
 1529          * following division.
 1530          */
 1531         cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
 1532         cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
 1533 
 1534         mfi_send_frame(sc, cm);
 1535 
 1536         return;
 1537 }
 1538 
 1539 static int
 1540 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
 1541 {
 1542         struct mfi_frame_header *hdr;
 1543         int tm = MFI_POLL_TIMEOUT_SECS * 1000;
 1544 
 1545         hdr = &cm->cm_frame->header;
 1546 
 1547         if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
 1548                 cm->cm_timestamp = time_uptime;
 1549                 mfi_enqueue_busy(cm);
 1550         } else {
 1551                 hdr->cmd_status = 0xff;
 1552                 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
 1553         }
 1554 
 1555         /*
 1556          * The bus address of the command is aligned on a 64 byte boundary,
 1557          * leaving the least 6 bits as zero.  For whatever reason, the
 1558          * hardware wants the address shifted right by three, leaving just
 1559          * 3 zero bits.  These three bits are then used as a prefetching
 1560          * hint for the hardware to predict how many frames need to be
 1561          * fetched across the bus.  If a command has more than 8 frames
 1562          * then the 3 bits are set to 0x7 and the firmware uses other
 1563          * information in the command to determine the total amount to fetch.
 1564          * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
 1565          * is enough for both 32bit and 64bit systems.
 1566          */
 1567         if (cm->cm_extra_frames > 7)
 1568                 cm->cm_extra_frames = 7;
 1569 
 1570         sc->mfi_issue_cmd(sc,cm->cm_frame_busaddr,cm->cm_extra_frames);
 1571 
 1572         if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
 1573                 return (0);
 1574 
 1575         /* This is a polled command, so busy-wait for it to complete. */
 1576         while (hdr->cmd_status == 0xff) {
 1577                 DELAY(1000);
 1578                 tm -= 1;
 1579                 if (tm <= 0)
 1580                         break;
 1581         }
 1582 
 1583         if (hdr->cmd_status == 0xff) {
 1584                 device_printf(sc->mfi_dev, "Frame %p timed out "
 1585                               "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
 1586                 return (ETIMEDOUT);
 1587         }
 1588 
 1589         return (0);
 1590 }
 1591 
 1592 static void
 1593 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
 1594 {
 1595         int dir;
 1596 
 1597         if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
 1598                 dir = 0;
 1599                 if (cm->cm_flags & MFI_CMD_DATAIN)
 1600                         dir |= BUS_DMASYNC_POSTREAD;
 1601                 if (cm->cm_flags & MFI_CMD_DATAOUT)
 1602                         dir |= BUS_DMASYNC_POSTWRITE;
 1603 
 1604                 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
 1605                 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
 1606                 cm->cm_flags &= ~MFI_CMD_MAPPED;
 1607         }
 1608 
 1609         cm->cm_flags |= MFI_CMD_COMPLETED;
 1610 
 1611         if (cm->cm_complete != NULL)
 1612                 cm->cm_complete(cm);
 1613         else
 1614                 wakeup(cm);
 1615 }
 1616 
 1617 static int
 1618 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
 1619 {
 1620         struct mfi_command *cm;
 1621         struct mfi_abort_frame *abort;
 1622         int i = 0;
 1623 
 1624         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1625 
 1626         if ((cm = mfi_dequeue_free(sc)) == NULL) {
 1627                 return (EBUSY);
 1628         }
 1629 
 1630         abort = &cm->cm_frame->abort;
 1631         abort->header.cmd = MFI_CMD_ABORT;
 1632         abort->header.flags = 0;
 1633         abort->abort_context = cm_abort->cm_frame->header.context;
 1634         abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
 1635         abort->abort_mfi_addr_hi = 0;
 1636         cm->cm_data = NULL;
 1637         cm->cm_flags = MFI_CMD_POLLED;
 1638 
 1639         sc->mfi_aen_cm->cm_aen_abort = 1;
 1640         mfi_mapcmd(sc, cm);
 1641         mfi_release_command(cm);
 1642 
 1643         while (i < 5 && sc->mfi_aen_cm != NULL) {
 1644                 msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz);
 1645                 i++;
 1646         }
 1647 
 1648         return (0);
 1649 }
 1650 
 1651 int
 1652 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
 1653 {
 1654         struct mfi_command *cm;
 1655         struct mfi_io_frame *io;
 1656         int error;
 1657 
 1658         if ((cm = mfi_dequeue_free(sc)) == NULL)
 1659                 return (EBUSY);
 1660 
 1661         io = &cm->cm_frame->io;
 1662         io->header.cmd = MFI_CMD_LD_WRITE;
 1663         io->header.target_id = id;
 1664         io->header.timeout = 0;
 1665         io->header.flags = 0;
 1666         io->header.sense_len = MFI_SENSE_LEN;
 1667         io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
 1668         io->sense_addr_lo = cm->cm_sense_busaddr;
 1669         io->sense_addr_hi = 0;
 1670         io->lba_hi = (lba & 0xffffffff00000000) >> 32;
 1671         io->lba_lo = lba & 0xffffffff;
 1672         cm->cm_data = virt;
 1673         cm->cm_len = len;
 1674         cm->cm_sg = &io->sgl;
 1675         cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
 1676         cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
 1677 
 1678         error = mfi_mapcmd(sc, cm);
 1679         bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
 1680             BUS_DMASYNC_POSTWRITE);
 1681         bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
 1682         mfi_release_command(cm);
 1683 
 1684         return (error);
 1685 }
 1686 
 1687 static int
 1688 mfi_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
 1689 {
 1690         struct mfi_softc *sc;
 1691         int error;
 1692 
 1693         sc = dev->si_drv1;
 1694 
 1695         mtx_lock(&sc->mfi_io_lock);
 1696         if (sc->mfi_detaching)
 1697                 error = ENXIO;
 1698         else {
 1699                 sc->mfi_flags |= MFI_FLAGS_OPEN;
 1700                 error = 0;
 1701         }
 1702         mtx_unlock(&sc->mfi_io_lock);
 1703 
 1704         return (error);
 1705 }
 1706 
 1707 static int
 1708 mfi_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
 1709 {
 1710         struct mfi_softc *sc;
 1711         struct mfi_aen *mfi_aen_entry, *tmp;
 1712 
 1713         sc = dev->si_drv1;
 1714 
 1715         mtx_lock(&sc->mfi_io_lock);
 1716         sc->mfi_flags &= ~MFI_FLAGS_OPEN;
 1717 
 1718         TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
 1719                 if (mfi_aen_entry->p == curproc) {
 1720                         TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
 1721                             aen_link);
 1722                         free(mfi_aen_entry, M_MFIBUF);
 1723                 }
 1724         }
 1725         mtx_unlock(&sc->mfi_io_lock);
 1726         return (0);
 1727 }
 1728 
 1729 static int
 1730 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
 1731 {
 1732 
 1733         switch (opcode) {
 1734         case MFI_DCMD_LD_DELETE:
 1735         case MFI_DCMD_CFG_ADD:
 1736         case MFI_DCMD_CFG_CLEAR:
 1737                 sx_xlock(&sc->mfi_config_lock);
 1738                 return (1);
 1739         default:
 1740                 return (0);
 1741         }
 1742 }
 1743 
 1744 static void
 1745 mfi_config_unlock(struct mfi_softc *sc, int locked)
 1746 {
 1747 
 1748         if (locked)
 1749                 sx_xunlock(&sc->mfi_config_lock);
 1750 }
 1751 
 1752 /* Perform pre-issue checks on commands from userland and possibly veto them. */
 1753 static int
 1754 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
 1755 {
 1756         struct mfi_disk *ld, *ld2;
 1757         int error;
 1758 
 1759         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1760         error = 0;
 1761         switch (cm->cm_frame->dcmd.opcode) {
 1762         case MFI_DCMD_LD_DELETE:
 1763                 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 1764                         if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
 1765                                 break;
 1766                 }
 1767                 if (ld == NULL)
 1768                         error = ENOENT;
 1769                 else
 1770                         error = mfi_disk_disable(ld);
 1771                 break;
 1772         case MFI_DCMD_CFG_CLEAR:
 1773                 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 1774                         error = mfi_disk_disable(ld);
 1775                         if (error)
 1776                                 break;
 1777                 }
 1778                 if (error) {
 1779                         TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
 1780                                 if (ld2 == ld)
 1781                                         break;
 1782                                 mfi_disk_enable(ld2);
 1783                         }
 1784                 }
 1785                 break;
 1786         default:
 1787                 break;
 1788         }
 1789         return (error);
 1790 }
 1791 
 1792 /* Perform post-issue checks on commands from userland. */
 1793 static void
 1794 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
 1795 {
 1796         struct mfi_disk *ld, *ldn;
 1797 
 1798         switch (cm->cm_frame->dcmd.opcode) {
 1799         case MFI_DCMD_LD_DELETE:
 1800                 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 1801                         if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
 1802                                 break;
 1803                 }
 1804                 KASSERT(ld != NULL, ("volume dissappeared"));
 1805                 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
 1806                         mtx_unlock(&sc->mfi_io_lock);
 1807                         mtx_lock(&Giant);
 1808                         device_delete_child(sc->mfi_dev, ld->ld_dev);
 1809                         mtx_unlock(&Giant);
 1810                         mtx_lock(&sc->mfi_io_lock);
 1811                 } else
 1812                         mfi_disk_enable(ld);
 1813                 break;
 1814         case MFI_DCMD_CFG_CLEAR:
 1815                 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
 1816                         mtx_unlock(&sc->mfi_io_lock);
 1817                         mtx_lock(&Giant);
 1818                         TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
 1819                                 device_delete_child(sc->mfi_dev, ld->ld_dev);
 1820                         }
 1821                         mtx_unlock(&Giant);
 1822                         mtx_lock(&sc->mfi_io_lock);
 1823                 } else {
 1824                         TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
 1825                                 mfi_disk_enable(ld);
 1826                 }
 1827                 break;
 1828         case MFI_DCMD_CFG_ADD:
 1829                 mfi_ldprobe(sc);
 1830                 break;
 1831         case MFI_DCMD_CFG_FOREIGN_IMPORT:
 1832                 mfi_ldprobe(sc);
 1833                 break;
 1834         }
 1835 }
 1836 
 1837 static int
 1838 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
 1839 {
 1840         struct mfi_command *cm;
 1841         struct mfi_dcmd_frame *dcmd;
 1842         void *ioc_buf = NULL;
 1843         uint32_t context;
 1844         int error = 0, locked;
 1845 
 1846 
 1847         if (ioc->buf_size > 0) {
 1848                 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
 1849                 if (ioc_buf == NULL) {
 1850                         return (ENOMEM);
 1851                 }
 1852                 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
 1853                 if (error) {
 1854                         device_printf(sc->mfi_dev, "failed to copyin\n");
 1855                         free(ioc_buf, M_MFIBUF);
 1856                         return (error);
 1857                 }
 1858         }
 1859 
 1860         locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
 1861 
 1862         mtx_lock(&sc->mfi_io_lock);
 1863         while ((cm = mfi_dequeue_free(sc)) == NULL)
 1864                 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
 1865 
 1866         /* Save context for later */
 1867         context = cm->cm_frame->header.context;
 1868 
 1869         dcmd = &cm->cm_frame->dcmd;
 1870         bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
 1871 
 1872         cm->cm_sg = &dcmd->sgl;
 1873         cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
 1874         cm->cm_data = ioc_buf;
 1875         cm->cm_len = ioc->buf_size;
 1876 
 1877         /* restore context */
 1878         cm->cm_frame->header.context = context;
 1879 
 1880         /* Cheat since we don't know if we're writing or reading */
 1881         cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
 1882 
 1883         error = mfi_check_command_pre(sc, cm);
 1884         if (error)
 1885                 goto out;
 1886 
 1887         error = mfi_wait_command(sc, cm);
 1888         if (error) {
 1889                 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
 1890                 goto out;
 1891         }
 1892         bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
 1893         mfi_check_command_post(sc, cm);
 1894 out:
 1895         mfi_release_command(cm);
 1896         mtx_unlock(&sc->mfi_io_lock);
 1897         mfi_config_unlock(sc, locked);
 1898         if (ioc->buf_size > 0)
 1899                 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
 1900         if (ioc_buf)
 1901                 free(ioc_buf, M_MFIBUF);
 1902         return (error);
 1903 }
 1904 
 1905 #ifdef __amd64__
 1906 #define PTRIN(p)                ((void *)(uintptr_t)(p))
 1907 #else
 1908 #define PTRIN(p)                (p)
 1909 #endif
 1910 
 1911 static int
 1912 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
 1913 {
 1914         struct mfi_softc *sc;
 1915         union mfi_statrequest *ms;
 1916         struct mfi_ioc_packet *ioc;
 1917 #ifdef __amd64__
 1918         struct mfi_ioc_packet32 *ioc32;
 1919 #endif
 1920         struct mfi_ioc_aen *aen;
 1921         struct mfi_command *cm = NULL;
 1922         uint32_t context;
 1923         union mfi_sense_ptr sense_ptr;
 1924         uint8_t *data = NULL, *temp;
 1925         int i;
 1926         struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
 1927 #ifdef __amd64__
 1928         struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
 1929         struct mfi_ioc_passthru iop_swab;
 1930 #endif
 1931         int error, locked;
 1932 
 1933         sc = dev->si_drv1;
 1934         error = 0;
 1935 
 1936         switch (cmd) {
 1937         case MFIIO_STATS:
 1938                 ms = (union mfi_statrequest *)arg;
 1939                 switch (ms->ms_item) {
 1940                 case MFIQ_FREE:
 1941                 case MFIQ_BIO:
 1942                 case MFIQ_READY:
 1943                 case MFIQ_BUSY:
 1944                         bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
 1945                             sizeof(struct mfi_qstat));
 1946                         break;
 1947                 default:
 1948                         error = ENOIOCTL;
 1949                         break;
 1950                 }
 1951                 break;
 1952         case MFIIO_QUERY_DISK:
 1953         {
 1954                 struct mfi_query_disk *qd;
 1955                 struct mfi_disk *ld;
 1956 
 1957                 qd = (struct mfi_query_disk *)arg;
 1958                 mtx_lock(&sc->mfi_io_lock);
 1959                 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 1960                         if (ld->ld_id == qd->array_id)
 1961                                 break;
 1962                 }
 1963                 if (ld == NULL) {
 1964                         qd->present = 0;
 1965                         mtx_unlock(&sc->mfi_io_lock);
 1966                         return (0);
 1967                 }
 1968                 qd->present = 1;
 1969                 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
 1970                         qd->open = 1;
 1971                 bzero(qd->devname, SPECNAMELEN + 1);
 1972                 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
 1973                 mtx_unlock(&sc->mfi_io_lock);
 1974                 break;
 1975         }
 1976         case MFI_CMD:
 1977 #ifdef __amd64__
 1978         case MFI_CMD32:
 1979 #endif
 1980                 {
 1981                 devclass_t devclass;
 1982                 ioc = (struct mfi_ioc_packet *)arg;
 1983                 int adapter;
 1984 
 1985                 adapter = ioc->mfi_adapter_no;
 1986                 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
 1987                         devclass = devclass_find("mfi");
 1988                         sc = devclass_get_softc(devclass, adapter);
 1989                 }
 1990                 mtx_lock(&sc->mfi_io_lock);
 1991                 if ((cm = mfi_dequeue_free(sc)) == NULL) {
 1992                         mtx_unlock(&sc->mfi_io_lock);
 1993                         return (EBUSY);
 1994                 }
 1995                 mtx_unlock(&sc->mfi_io_lock);
 1996                 locked = 0;
 1997 
 1998                 /*
 1999                  * save off original context since copying from user
 2000                  * will clobber some data
 2001                  */
 2002                 context = cm->cm_frame->header.context;
 2003 
 2004                 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
 2005                     2 * MFI_DCMD_FRAME_SIZE);  /* this isn't quite right */
 2006                 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
 2007                     * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
 2008                 if (ioc->mfi_sge_count) {
 2009                         cm->cm_sg =
 2010                             (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
 2011                 }
 2012                 cm->cm_flags = 0;
 2013                 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
 2014                         cm->cm_flags |= MFI_CMD_DATAIN;
 2015                 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
 2016                         cm->cm_flags |= MFI_CMD_DATAOUT;
 2017                 /* Legacy app shim */
 2018                 if (cm->cm_flags == 0)
 2019                         cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
 2020                 cm->cm_len = cm->cm_frame->header.data_len;
 2021                 if (cm->cm_len &&
 2022                     (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
 2023                         cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
 2024                             M_WAITOK | M_ZERO);
 2025                         if (cm->cm_data == NULL) {
 2026                                 device_printf(sc->mfi_dev, "Malloc failed\n");
 2027                                 goto out;
 2028                         }
 2029                 } else {
 2030                         cm->cm_data = 0;
 2031                 }
 2032 
 2033                 /* restore header context */
 2034                 cm->cm_frame->header.context = context;
 2035 
 2036                 temp = data;
 2037                 if (cm->cm_flags & MFI_CMD_DATAOUT) {
 2038                         for (i = 0; i < ioc->mfi_sge_count; i++) {
 2039 #ifdef __amd64__
 2040                                 if (cmd == MFI_CMD) {
 2041                                         /* Native */
 2042                                         error = copyin(ioc->mfi_sgl[i].iov_base,
 2043                                                temp,
 2044                                                ioc->mfi_sgl[i].iov_len);
 2045                                 } else {
 2046                                         void *temp_convert;
 2047                                         /* 32bit */
 2048                                         ioc32 = (struct mfi_ioc_packet32 *)ioc;
 2049                                         temp_convert =
 2050                                             PTRIN(ioc32->mfi_sgl[i].iov_base);
 2051                                         error = copyin(temp_convert,
 2052                                                temp,
 2053                                                ioc32->mfi_sgl[i].iov_len);
 2054                                 }
 2055 #else
 2056                                 error = copyin(ioc->mfi_sgl[i].iov_base,
 2057                                        temp,
 2058                                        ioc->mfi_sgl[i].iov_len);
 2059 #endif
 2060                                 if (error != 0) {
 2061                                         device_printf(sc->mfi_dev,
 2062                                             "Copy in failed\n");
 2063                                         goto out;
 2064                                 }
 2065                                 temp = &temp[ioc->mfi_sgl[i].iov_len];
 2066                         }
 2067                 }
 2068 
 2069                 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
 2070                         locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
 2071 
 2072                 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
 2073                         cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
 2074                         cm->cm_frame->pass.sense_addr_hi = 0;
 2075                 }
 2076 
 2077                 mtx_lock(&sc->mfi_io_lock);
 2078                 error = mfi_check_command_pre(sc, cm);
 2079                 if (error) {
 2080                         mtx_unlock(&sc->mfi_io_lock);
 2081                         goto out;
 2082                 }
 2083 
 2084                 if ((error = mfi_wait_command(sc, cm)) != 0) {
 2085                         device_printf(sc->mfi_dev,
 2086                             "Controller polled failed\n");
 2087                         mtx_unlock(&sc->mfi_io_lock);
 2088                         goto out;
 2089                 }
 2090 
 2091                 mfi_check_command_post(sc, cm);
 2092                 mtx_unlock(&sc->mfi_io_lock);
 2093 
 2094                 temp = data;
 2095                 if (cm->cm_flags & MFI_CMD_DATAIN) {
 2096                         for (i = 0; i < ioc->mfi_sge_count; i++) {
 2097 #ifdef __amd64__
 2098                                 if (cmd == MFI_CMD) {
 2099                                         /* Native */
 2100                                         error = copyout(temp,
 2101                                                 ioc->mfi_sgl[i].iov_base,
 2102                                                 ioc->mfi_sgl[i].iov_len);
 2103                                 } else {
 2104                                         void *temp_convert;
 2105                                         /* 32bit */
 2106                                         ioc32 = (struct mfi_ioc_packet32 *)ioc;
 2107                                         temp_convert =
 2108                                             PTRIN(ioc32->mfi_sgl[i].iov_base);
 2109                                         error = copyout(temp,
 2110                                                 temp_convert,
 2111                                                 ioc32->mfi_sgl[i].iov_len);
 2112                                 }
 2113 #else
 2114                                 error = copyout(temp,
 2115                                         ioc->mfi_sgl[i].iov_base,
 2116                                         ioc->mfi_sgl[i].iov_len);
 2117 #endif
 2118                                 if (error != 0) {
 2119                                         device_printf(sc->mfi_dev,
 2120                                             "Copy out failed\n");
 2121                                         goto out;
 2122                                 }
 2123                                 temp = &temp[ioc->mfi_sgl[i].iov_len];
 2124                         }
 2125                 }
 2126 
 2127                 if (ioc->mfi_sense_len) {
 2128                         /* get user-space sense ptr then copy out sense */
 2129                         bcopy(&((struct mfi_ioc_packet*)arg)
 2130                             ->mfi_frame.raw[ioc->mfi_sense_off],
 2131                             &sense_ptr.sense_ptr_data[0],
 2132                             sizeof(sense_ptr.sense_ptr_data));
 2133 #ifdef __amd64__
 2134                         if (cmd != MFI_CMD) {
 2135                                 /*
 2136                                  * not 64bit native so zero out any address
 2137                                  * over 32bit */
 2138                                 sense_ptr.addr.high = 0;
 2139                         }
 2140 #endif
 2141                         error = copyout(cm->cm_sense, sense_ptr.user_space,
 2142                             ioc->mfi_sense_len);
 2143                         if (error != 0) {
 2144                                 device_printf(sc->mfi_dev,
 2145                                     "Copy out failed\n");
 2146                                 goto out;
 2147                         }
 2148                 }
 2149 
 2150                 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
 2151 out:
 2152                 mfi_config_unlock(sc, locked);
 2153                 if (data)
 2154                         free(data, M_MFIBUF);
 2155                 if (cm) {
 2156                         mtx_lock(&sc->mfi_io_lock);
 2157                         mfi_release_command(cm);
 2158                         mtx_unlock(&sc->mfi_io_lock);
 2159                 }
 2160 
 2161                 break;
 2162                 }
 2163         case MFI_SET_AEN:
 2164                 aen = (struct mfi_ioc_aen *)arg;
 2165                 error = mfi_aen_register(sc, aen->aen_seq_num,
 2166                     aen->aen_class_locale);
 2167 
 2168                 break;
 2169         case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
 2170                 {
 2171                         devclass_t devclass;
 2172                         struct mfi_linux_ioc_packet l_ioc;
 2173                         int adapter;
 2174 
 2175                         devclass = devclass_find("mfi");
 2176                         if (devclass == NULL)
 2177                                 return (ENOENT);
 2178 
 2179                         error = copyin(arg, &l_ioc, sizeof(l_ioc));
 2180                         if (error)
 2181                                 return (error);
 2182                         adapter = l_ioc.lioc_adapter_no;
 2183                         sc = devclass_get_softc(devclass, adapter);
 2184                         if (sc == NULL)
 2185                                 return (ENOENT);
 2186                         return (mfi_linux_ioctl_int(sc->mfi_cdev,
 2187                             cmd, arg, flag, td));
 2188                         break;
 2189                 }
 2190         case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
 2191                 {
 2192                         devclass_t devclass;
 2193                         struct mfi_linux_ioc_aen l_aen;
 2194                         int adapter;
 2195 
 2196                         devclass = devclass_find("mfi");
 2197                         if (devclass == NULL)
 2198                                 return (ENOENT);
 2199 
 2200                         error = copyin(arg, &l_aen, sizeof(l_aen));
 2201                         if (error)
 2202                                 return (error);
 2203                         adapter = l_aen.laen_adapter_no;
 2204                         sc = devclass_get_softc(devclass, adapter);
 2205                         if (sc == NULL)
 2206                                 return (ENOENT);
 2207                         return (mfi_linux_ioctl_int(sc->mfi_cdev,
 2208                             cmd, arg, flag, td));
 2209                         break;
 2210                 }
 2211 #ifdef __amd64__
 2212         case MFIIO_PASSTHRU32:
 2213                 iop_swab.ioc_frame      = iop32->ioc_frame;
 2214                 iop_swab.buf_size       = iop32->buf_size;
 2215                 iop_swab.buf            = PTRIN(iop32->buf);
 2216                 iop                     = &iop_swab;
 2217                 /* FALLTHROUGH */
 2218 #endif
 2219         case MFIIO_PASSTHRU:
 2220                 error = mfi_user_command(sc, iop);
 2221 #ifdef __amd64__
 2222                 if (cmd == MFIIO_PASSTHRU32)
 2223                         iop32->ioc_frame = iop_swab.ioc_frame;
 2224 #endif
 2225                 break;
 2226         }
 2227 
 2228         return (error);
 2229 }
 2230 
 2231 static int
 2232 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
 2233 {
 2234         struct mfi_softc *sc;
 2235         struct mfi_linux_ioc_packet l_ioc;
 2236         struct mfi_linux_ioc_aen l_aen;
 2237         struct mfi_command *cm = NULL;
 2238         struct mfi_aen *mfi_aen_entry;
 2239         union mfi_sense_ptr sense_ptr;
 2240         uint32_t context;
 2241         uint8_t *data = NULL, *temp;
 2242         int i;
 2243         int error, locked;
 2244 
 2245         sc = dev->si_drv1;
 2246         error = 0;
 2247         switch (cmd) {
 2248         case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
 2249                 error = copyin(arg, &l_ioc, sizeof(l_ioc));
 2250                 if (error != 0)
 2251                         return (error);
 2252 
 2253                 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
 2254                         return (EINVAL);
 2255                 }
 2256 
 2257                 mtx_lock(&sc->mfi_io_lock);
 2258                 if ((cm = mfi_dequeue_free(sc)) == NULL) {
 2259                         mtx_unlock(&sc->mfi_io_lock);
 2260                         return (EBUSY);
 2261                 }
 2262                 mtx_unlock(&sc->mfi_io_lock);
 2263                 locked = 0;
 2264 
 2265                 /*
 2266                  * save off original context since copying from user
 2267                  * will clobber some data
 2268                  */
 2269                 context = cm->cm_frame->header.context;
 2270 
 2271                 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
 2272                       2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
 2273                 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
 2274                       * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
 2275                 if (l_ioc.lioc_sge_count)
 2276                         cm->cm_sg =
 2277                             (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
 2278                 cm->cm_flags = 0;
 2279                 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
 2280                         cm->cm_flags |= MFI_CMD_DATAIN;
 2281                 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
 2282                         cm->cm_flags |= MFI_CMD_DATAOUT;
 2283                 cm->cm_len = cm->cm_frame->header.data_len;
 2284                 if (cm->cm_len &&
 2285                       (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
 2286                         cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
 2287                             M_WAITOK | M_ZERO);
 2288                         if (cm->cm_data == NULL) {
 2289                                 device_printf(sc->mfi_dev, "Malloc failed\n");
 2290                                 goto out;
 2291                         }
 2292                 } else {
 2293                         cm->cm_data = 0;
 2294                 }
 2295 
 2296                 /* restore header context */
 2297                 cm->cm_frame->header.context = context;
 2298 
 2299                 temp = data;
 2300                 if (cm->cm_flags & MFI_CMD_DATAOUT) {
 2301                         for (i = 0; i < l_ioc.lioc_sge_count; i++) {
 2302                                 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
 2303                                        temp,
 2304                                        l_ioc.lioc_sgl[i].iov_len);
 2305                                 if (error != 0) {
 2306                                         device_printf(sc->mfi_dev,
 2307                                             "Copy in failed\n");
 2308                                         goto out;
 2309                                 }
 2310                                 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
 2311                         }
 2312                 }
 2313 
 2314                 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
 2315                         locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
 2316 
 2317                 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
 2318                         cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
 2319                         cm->cm_frame->pass.sense_addr_hi = 0;
 2320                 }
 2321 
 2322                 mtx_lock(&sc->mfi_io_lock);
 2323                 error = mfi_check_command_pre(sc, cm);
 2324                 if (error) {
 2325                         mtx_unlock(&sc->mfi_io_lock);
 2326                         goto out;
 2327                 }
 2328 
 2329                 if ((error = mfi_wait_command(sc, cm)) != 0) {
 2330                         device_printf(sc->mfi_dev,
 2331                             "Controller polled failed\n");
 2332                         mtx_unlock(&sc->mfi_io_lock);
 2333                         goto out;
 2334                 }
 2335 
 2336                 mfi_check_command_post(sc, cm);
 2337                 mtx_unlock(&sc->mfi_io_lock);
 2338 
 2339                 temp = data;
 2340                 if (cm->cm_flags & MFI_CMD_DATAIN) {
 2341                         for (i = 0; i < l_ioc.lioc_sge_count; i++) {
 2342                                 error = copyout(temp,
 2343                                         PTRIN(l_ioc.lioc_sgl[i].iov_base),
 2344                                         l_ioc.lioc_sgl[i].iov_len);
 2345                                 if (error != 0) {
 2346                                         device_printf(sc->mfi_dev,
 2347                                             "Copy out failed\n");
 2348                                         goto out;
 2349                                 }
 2350                                 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
 2351                         }
 2352                 }
 2353 
 2354                 if (l_ioc.lioc_sense_len) {
 2355                         /* get user-space sense ptr then copy out sense */
 2356                         bcopy(&((struct mfi_linux_ioc_packet*)arg)
 2357                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
 2358                             &sense_ptr.sense_ptr_data[0],
 2359                             sizeof(sense_ptr.sense_ptr_data));
 2360 #ifdef __amd64__
 2361                         /*
 2362                          * only 32bit Linux support so zero out any
 2363                          * address over 32bit
 2364                          */
 2365                         sense_ptr.addr.high = 0;
 2366 #endif
 2367                         error = copyout(cm->cm_sense, sense_ptr.user_space,
 2368                             l_ioc.lioc_sense_len);
 2369                         if (error != 0) {
 2370                                 device_printf(sc->mfi_dev,
 2371                                     "Copy out failed\n");
 2372                                 goto out;
 2373                         }
 2374                 }
 2375 
 2376                 error = copyout(&cm->cm_frame->header.cmd_status,
 2377                         &((struct mfi_linux_ioc_packet*)arg)
 2378                         ->lioc_frame.hdr.cmd_status,
 2379                         1);
 2380                 if (error != 0) {
 2381                         device_printf(sc->mfi_dev,
 2382                                       "Copy out failed\n");
 2383                         goto out;
 2384                 }
 2385 
 2386 out:
 2387                 mfi_config_unlock(sc, locked);
 2388                 if (data)
 2389                         free(data, M_MFIBUF);
 2390                 if (cm) {
 2391                         mtx_lock(&sc->mfi_io_lock);
 2392                         mfi_release_command(cm);
 2393                         mtx_unlock(&sc->mfi_io_lock);
 2394                 }
 2395 
 2396                 return (error);
 2397         case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
 2398                 error = copyin(arg, &l_aen, sizeof(l_aen));
 2399                 if (error != 0)
 2400                         return (error);
 2401                 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
 2402                 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
 2403                     M_WAITOK);
 2404                 mtx_lock(&sc->mfi_io_lock);
 2405                 if (mfi_aen_entry != NULL) {
 2406                         mfi_aen_entry->p = curproc;
 2407                         TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
 2408                             aen_link);
 2409                 }
 2410                 error = mfi_aen_register(sc, l_aen.laen_seq_num,
 2411                     l_aen.laen_class_locale);
 2412 
 2413                 if (error != 0) {
 2414                         TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
 2415                             aen_link);
 2416                         free(mfi_aen_entry, M_MFIBUF);
 2417                 }
 2418                 mtx_unlock(&sc->mfi_io_lock);
 2419 
 2420                 return (error);
 2421         default:
 2422                 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
 2423                 error = ENOENT;
 2424                 break;
 2425         }
 2426 
 2427         return (error);
 2428 }
 2429 
 2430 static int
 2431 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
 2432 {
 2433         struct mfi_softc *sc;
 2434         int revents = 0;
 2435 
 2436         sc = dev->si_drv1;
 2437 
 2438         if (poll_events & (POLLIN | POLLRDNORM)) {
 2439                 if (sc->mfi_aen_triggered != 0) {
 2440                         revents |= poll_events & (POLLIN | POLLRDNORM);
 2441                         sc->mfi_aen_triggered = 0;
 2442                 }
 2443                 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
 2444                         revents |= POLLERR;
 2445                 }
 2446         }
 2447 
 2448         if (revents == 0) {
 2449                 if (poll_events & (POLLIN | POLLRDNORM)) {
 2450                         sc->mfi_poll_waiting = 1;
 2451                         selrecord(td, &sc->mfi_select);
 2452                 }
 2453         }
 2454 
 2455         return revents;
 2456 }
 2457 
 2458 
 2459 static void
 2460 mfi_dump_all(void)
 2461 {
 2462         struct mfi_softc *sc;
 2463         struct mfi_command *cm;
 2464         devclass_t dc;
 2465         time_t deadline;
 2466         int timedout;
 2467         int i;
 2468 
 2469         dc = devclass_find("mfi");
 2470         if (dc == NULL) {
 2471                 printf("No mfi dev class\n");
 2472                 return;
 2473         }
 2474 
 2475         for (i = 0; ; i++) {
 2476                 sc = devclass_get_softc(dc, i);
 2477                 if (sc == NULL)
 2478                         break;
 2479                 device_printf(sc->mfi_dev, "Dumping\n\n");
 2480                 timedout = 0;
 2481                 deadline = time_uptime - MFI_CMD_TIMEOUT;
 2482                 mtx_lock(&sc->mfi_io_lock);
 2483                 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
 2484                         if (cm->cm_timestamp < deadline) {
 2485                                 device_printf(sc->mfi_dev,
 2486                                     "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
 2487                                     (int)(time_uptime - cm->cm_timestamp));
 2488                                 MFI_PRINT_CMD(cm);
 2489                                 timedout++;
 2490                         }
 2491                 }
 2492 
 2493 #if 0
 2494                 if (timedout)
 2495                         MFI_DUMP_CMDS(SC);
 2496 #endif
 2497 
 2498                 mtx_unlock(&sc->mfi_io_lock);
 2499         }
 2500 
 2501         return;
 2502 }
 2503 
 2504 static void
 2505 mfi_timeout(void *data)
 2506 {
 2507         struct mfi_softc *sc = (struct mfi_softc *)data;
 2508         struct mfi_command *cm;
 2509         time_t deadline;
 2510         int timedout = 0;
 2511 
 2512         deadline = time_uptime - MFI_CMD_TIMEOUT;
 2513         mtx_lock(&sc->mfi_io_lock);
 2514         TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
 2515                 if (sc->mfi_aen_cm == cm)
 2516                         continue;
 2517                 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
 2518                         device_printf(sc->mfi_dev,
 2519                             "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
 2520                             (int)(time_uptime - cm->cm_timestamp));
 2521                         MFI_PRINT_CMD(cm);
 2522                         MFI_VALIDATE_CMD(sc, cm);
 2523                         timedout++;
 2524                 }
 2525         }
 2526 
 2527 #if 0
 2528         if (timedout)
 2529                 MFI_DUMP_CMDS(SC);
 2530 #endif
 2531 
 2532         mtx_unlock(&sc->mfi_io_lock);
 2533 
 2534         callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
 2535             mfi_timeout, sc);
 2536 
 2537         if (0)
 2538                 mfi_dump_all();
 2539         return;
 2540 }

Cache object: bdf0e0d7127fd2aeb8ce60d2618687d7


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.