The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mfi/mfi.c

Version: -  FREEBSD  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-2  -  FREEBSD-11-1  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-4  -  FREEBSD-10-3  -  FREEBSD-10-2  -  FREEBSD-10-1  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-3  -  FREEBSD-9-2  -  FREEBSD-9-1  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-4  -  FREEBSD-8-3  -  FREEBSD-8-2  -  FREEBSD-8-1  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-4  -  FREEBSD-7-3  -  FREEBSD-7-2  -  FREEBSD-7-1  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-4  -  FREEBSD-6-3  -  FREEBSD-6-2  -  FREEBSD-6-1  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-5  -  FREEBSD-5-4  -  FREEBSD-5-3  -  FREEBSD-5-2  -  FREEBSD-5-1  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2006 IronPort Systems
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 /*-
   27  * Copyright (c) 2007 LSI Corp.
   28  * Copyright (c) 2007 Rajesh Prabhakaran.
   29  * All rights reserved.
   30  *
   31  * Redistribution and use in source and binary forms, with or without
   32  * modification, are permitted provided that the following conditions
   33  * are met:
   34  * 1. Redistributions of source code must retain the above copyright
   35  *    notice, this list of conditions and the following disclaimer.
   36  * 2. Redistributions in binary form must reproduce the above copyright
   37  *    notice, this list of conditions and the following disclaimer in the
   38  *    documentation and/or other materials provided with the distribution.
   39  *
   40  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   50  * SUCH DAMAGE.
   51  */
   52 
   53 #include <sys/cdefs.h>
   54 __FBSDID("$FreeBSD$");
   55 
   56 #include "opt_mfi.h"
   57 
   58 #include <sys/param.h>
   59 #include <sys/systm.h>
   60 #include <sys/sysctl.h>
   61 #include <sys/malloc.h>
   62 #include <sys/kernel.h>
   63 #include <sys/poll.h>
   64 #include <sys/selinfo.h>
   65 #include <sys/bus.h>
   66 #include <sys/conf.h>
   67 #include <sys/eventhandler.h>
   68 #include <sys/rman.h>
   69 #include <sys/bus_dma.h>
   70 #include <sys/bio.h>
   71 #include <sys/ioccom.h>
   72 #include <sys/uio.h>
   73 #include <sys/proc.h>
   74 #include <sys/signalvar.h>
   75 
   76 #include <machine/bus.h>
   77 #include <machine/resource.h>
   78 
   79 #include <dev/mfi/mfireg.h>
   80 #include <dev/mfi/mfi_ioctl.h>
   81 #include <dev/mfi/mfivar.h>
   82 
   83 static int      mfi_alloc_commands(struct mfi_softc *);
   84 static int      mfi_comms_init(struct mfi_softc *);
   85 static int      mfi_wait_command(struct mfi_softc *, struct mfi_command *);
   86 static int      mfi_get_controller_info(struct mfi_softc *);
   87 static int      mfi_get_log_state(struct mfi_softc *,
   88                     struct mfi_evt_log_state **);
   89 static int      mfi_parse_entries(struct mfi_softc *, int, int);
   90 static int      mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
   91                     uint32_t, void **, size_t);
   92 static void     mfi_data_cb(void *, bus_dma_segment_t *, int, int);
   93 static void     mfi_startup(void *arg);
   94 static void     mfi_intr(void *arg);
   95 static void     mfi_ldprobe(struct mfi_softc *sc);
   96 static int      mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
   97 static void     mfi_aen_complete(struct mfi_command *);
   98 static int      mfi_aen_setup(struct mfi_softc *, uint32_t);
   99 static int      mfi_add_ld(struct mfi_softc *sc, int);
  100 static void     mfi_add_ld_complete(struct mfi_command *);
  101 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
  102 static void     mfi_bio_complete(struct mfi_command *);
  103 static int      mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
  104 static int      mfi_send_frame(struct mfi_softc *, struct mfi_command *);
  105 static void     mfi_complete(struct mfi_softc *, struct mfi_command *);
  106 static int      mfi_abort(struct mfi_softc *, struct mfi_command *);
  107 static int      mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, d_thread_t *);
  108 static void     mfi_timeout(void *);
  109 static int      mfi_user_command(struct mfi_softc *,
  110                     struct mfi_ioc_passthru *);
  111 static void     mfi_enable_intr_xscale(struct mfi_softc *sc);
  112 static void     mfi_enable_intr_ppc(struct mfi_softc *sc);
  113 static int32_t  mfi_read_fw_status_xscale(struct mfi_softc *sc);
  114 static int32_t  mfi_read_fw_status_ppc(struct mfi_softc *sc);
  115 static int      mfi_check_clear_intr_xscale(struct mfi_softc *sc);
  116 static int      mfi_check_clear_intr_ppc(struct mfi_softc *sc);
  117 static void     mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
  118 static void     mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
  119 
  120 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
  121 static int      mfi_event_locale = MFI_EVT_LOCALE_ALL;
  122 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
  123 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
  124             0, "event message locale");
  125 
  126 static int      mfi_event_class = MFI_EVT_CLASS_INFO;
  127 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
  128 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
  129           0, "event message class");
  130 
  131 static int      mfi_max_cmds = 128;
  132 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
  133 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
  134            0, "Max commands");
  135 
  136 /* Management interface */
  137 static d_open_t         mfi_open;
  138 static d_close_t        mfi_close;
  139 static d_ioctl_t        mfi_ioctl;
  140 static d_poll_t         mfi_poll;
  141 
  142 static struct cdevsw mfi_cdevsw = {
  143         .d_version =    D_VERSION,
  144         .d_flags =      0,
  145         .d_open =       mfi_open,
  146         .d_close =      mfi_close,
  147         .d_ioctl =      mfi_ioctl,
  148         .d_poll =       mfi_poll,
  149         .d_name =       "mfi",
  150 };
  151 
  152 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
  153 
  154 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
  155 
  156 static void
  157 mfi_enable_intr_xscale(struct mfi_softc *sc)
  158 {
  159         MFI_WRITE4(sc, MFI_OMSK, 0x01);
  160 }
  161 
  162 static void
  163 mfi_enable_intr_ppc(struct mfi_softc *sc)
  164 {
  165         MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
  166         if (sc->mfi_flags & MFI_FLAGS_1078) {
  167                 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
  168         } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
  169                 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
  170         }
  171 }
  172 
  173 static int32_t
  174 mfi_read_fw_status_xscale(struct mfi_softc *sc)
  175 {
  176         return MFI_READ4(sc, MFI_OMSG0);
  177 }
  178 
  179 static int32_t
  180 mfi_read_fw_status_ppc(struct mfi_softc *sc)
  181 {
  182         return MFI_READ4(sc, MFI_OSP0);
  183 }
  184 
  185 static int
  186 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
  187 {
  188         int32_t status;
  189 
  190         status = MFI_READ4(sc, MFI_OSTS);
  191         if ((status & MFI_OSTS_INTR_VALID) == 0)
  192                 return 1;
  193 
  194         MFI_WRITE4(sc, MFI_OSTS, status);
  195         return 0;
  196 }
  197 
  198 static int
  199 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
  200 {
  201         int32_t status;
  202 
  203         status = MFI_READ4(sc, MFI_OSTS);
  204         if (sc->mfi_flags & MFI_FLAGS_1078) {
  205                 if (!(status & MFI_1078_RM)) {
  206                         return 1;
  207                 }
  208         } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
  209                 if (!(status & MFI_GEN2_RM)) {
  210                         return 1;
  211                 }
  212         }
  213 
  214         MFI_WRITE4(sc, MFI_ODCR0, status);
  215         return 0;
  216 }
  217 
  218 static void
  219 mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
  220 {
  221         MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
  222 }
  223 
  224 static void
  225 mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
  226 {
  227         MFI_WRITE4(sc, MFI_IQP, (bus_add |frame_cnt <<1)|1 );
  228 }
  229 
  230 static int
  231 mfi_transition_firmware(struct mfi_softc *sc)
  232 {
  233         int32_t fw_state, cur_state;
  234         int max_wait, i;
  235 
  236         fw_state = sc->mfi_read_fw_status(sc)& MFI_FWSTATE_MASK;
  237         while (fw_state != MFI_FWSTATE_READY) {
  238                 if (bootverbose)
  239                         device_printf(sc->mfi_dev, "Waiting for firmware to "
  240                         "become ready\n");
  241                 cur_state = fw_state;
  242                 switch (fw_state) {
  243                 case MFI_FWSTATE_FAULT:
  244                         device_printf(sc->mfi_dev, "Firmware fault\n");
  245                         return (ENXIO);
  246                 case MFI_FWSTATE_WAIT_HANDSHAKE:
  247                         MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
  248                         max_wait = 2;
  249                         break;
  250                 case MFI_FWSTATE_OPERATIONAL:
  251                         MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
  252                         max_wait = 10;
  253                         break;
  254                 case MFI_FWSTATE_UNDEFINED:
  255                 case MFI_FWSTATE_BB_INIT:
  256                         max_wait = 2;
  257                         break;
  258                 case MFI_FWSTATE_FW_INIT:
  259                 case MFI_FWSTATE_DEVICE_SCAN:
  260                 case MFI_FWSTATE_FLUSH_CACHE:
  261                         max_wait = 20;
  262                         break;
  263                 default:
  264                         device_printf(sc->mfi_dev,"Unknown firmware state %d\n",
  265                             fw_state);
  266                         return (ENXIO);
  267                 }
  268                 for (i = 0; i < (max_wait * 10); i++) {
  269                         fw_state = sc->mfi_read_fw_status(sc) & MFI_FWSTATE_MASK;
  270                         if (fw_state == cur_state)
  271                                 DELAY(100000);
  272                         else
  273                                 break;
  274                 }
  275                 if (fw_state == cur_state) {
  276                         device_printf(sc->mfi_dev, "firmware stuck in state "
  277                             "%#x\n", fw_state);
  278                         return (ENXIO);
  279                 }
  280         }
  281         return (0);
  282 }
  283 
  284 static void
  285 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  286 {
  287         uint32_t *addr;
  288 
  289         addr = arg;
  290         *addr = segs[0].ds_addr;
  291 }
  292 
  293 int
  294 mfi_attach(struct mfi_softc *sc)
  295 {
  296         uint32_t status;
  297         int error, commsz, framessz, sensesz;
  298         int frames, unit, max_fw_sge;
  299 
  300         device_printf(sc->mfi_dev, "Megaraid SAS driver Ver 3.00 \n");
  301 
  302         mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
  303         sx_init(&sc->mfi_config_lock, "MFI config");
  304         TAILQ_INIT(&sc->mfi_ld_tqh);
  305         TAILQ_INIT(&sc->mfi_aen_pids);
  306         TAILQ_INIT(&sc->mfi_cam_ccbq);
  307 
  308         mfi_initq_free(sc);
  309         mfi_initq_ready(sc);
  310         mfi_initq_busy(sc);
  311         mfi_initq_bio(sc);
  312 
  313         if (sc->mfi_flags & MFI_FLAGS_1064R) {
  314                 sc->mfi_enable_intr = mfi_enable_intr_xscale;
  315                 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
  316                 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
  317                 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
  318         }
  319         else {
  320                 sc->mfi_enable_intr =  mfi_enable_intr_ppc;
  321                 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
  322                 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
  323                 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
  324         }
  325 
  326 
  327         /* Before we get too far, see if the firmware is working */
  328         if ((error = mfi_transition_firmware(sc)) != 0) {
  329                 device_printf(sc->mfi_dev, "Firmware not in READY state, "
  330                     "error %d\n", error);
  331                 return (ENXIO);
  332         }
  333 
  334         /*
  335          * Get information needed for sizing the contiguous memory for the
  336          * frame pool.  Size down the sgl parameter since we know that
  337          * we will never need more than what's required for MAXPHYS.
  338          * It would be nice if these constants were available at runtime
  339          * instead of compile time.
  340          */
  341         status = sc->mfi_read_fw_status(sc);
  342         sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
  343         max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
  344         sc->mfi_max_sge = min(max_fw_sge, ((MAXPHYS / PAGE_SIZE) + 1));
  345 
  346         /*
  347          * Create the dma tag for data buffers.  Used both for block I/O
  348          * and for various internal data queries.
  349          */
  350         if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  351                                 1, 0,                   /* algnmnt, boundary */
  352                                 BUS_SPACE_MAXADDR,      /* lowaddr */
  353                                 BUS_SPACE_MAXADDR,      /* highaddr */
  354                                 NULL, NULL,             /* filter, filterarg */
  355                                 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
  356                                 sc->mfi_max_sge,        /* nsegments */
  357                                 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
  358                                 BUS_DMA_ALLOCNOW,       /* flags */
  359                                 busdma_lock_mutex,      /* lockfunc */
  360                                 &sc->mfi_io_lock,       /* lockfuncarg */
  361                                 &sc->mfi_buffer_dmat)) {
  362                 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
  363                 return (ENOMEM);
  364         }
  365 
  366         /*
  367          * Allocate DMA memory for the comms queues.  Keep it under 4GB for
  368          * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
  369          * entry, so the calculated size here will be will be 1 more than
  370          * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
  371          */
  372         commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
  373             sizeof(struct mfi_hwcomms);
  374         if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  375                                 1, 0,                   /* algnmnt, boundary */
  376                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  377                                 BUS_SPACE_MAXADDR,      /* highaddr */
  378                                 NULL, NULL,             /* filter, filterarg */
  379                                 commsz,                 /* maxsize */
  380                                 1,                      /* msegments */
  381                                 commsz,                 /* maxsegsize */
  382                                 0,                      /* flags */
  383                                 NULL, NULL,             /* lockfunc, lockarg */
  384                                 &sc->mfi_comms_dmat)) {
  385                 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
  386                 return (ENOMEM);
  387         }
  388         if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
  389             BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
  390                 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
  391                 return (ENOMEM);
  392         }
  393         bzero(sc->mfi_comms, commsz);
  394         bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
  395             sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
  396 
  397         /*
  398          * Allocate DMA memory for the command frames.  Keep them in the
  399          * lower 4GB for efficiency.  Calculate the size of the commands at
  400          * the same time; each command is one 64 byte frame plus a set of
  401          * additional frames for holding sg lists or other data.
  402          * The assumption here is that the SG list will start at the second
  403          * frame and not use the unused bytes in the first frame.  While this
  404          * isn't technically correct, it simplifies the calculation and allows
  405          * for command frames that might be larger than an mfi_io_frame.
  406          */
  407         if (sizeof(bus_addr_t) == 8) {
  408                 sc->mfi_sge_size = sizeof(struct mfi_sg64);
  409                 sc->mfi_flags |= MFI_FLAGS_SG64;
  410         } else {
  411                 sc->mfi_sge_size = sizeof(struct mfi_sg32);
  412         }
  413         frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
  414         sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
  415         framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
  416         if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  417                                 64, 0,                  /* algnmnt, boundary */
  418                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  419                                 BUS_SPACE_MAXADDR,      /* highaddr */
  420                                 NULL, NULL,             /* filter, filterarg */
  421                                 framessz,               /* maxsize */
  422                                 1,                      /* nsegments */
  423                                 framessz,               /* maxsegsize */
  424                                 0,                      /* flags */
  425                                 NULL, NULL,             /* lockfunc, lockarg */
  426                                 &sc->mfi_frames_dmat)) {
  427                 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
  428                 return (ENOMEM);
  429         }
  430         if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
  431             BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
  432                 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
  433                 return (ENOMEM);
  434         }
  435         bzero(sc->mfi_frames, framessz);
  436         bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
  437             sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
  438 
  439         /*
  440          * Allocate DMA memory for the frame sense data.  Keep them in the
  441          * lower 4GB for efficiency
  442          */
  443         sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
  444         if (bus_dma_tag_create( sc->mfi_parent_dmat,    /* parent */
  445                                 4, 0,                   /* algnmnt, boundary */
  446                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  447                                 BUS_SPACE_MAXADDR,      /* highaddr */
  448                                 NULL, NULL,             /* filter, filterarg */
  449                                 sensesz,                /* maxsize */
  450                                 1,                      /* nsegments */
  451                                 sensesz,                /* maxsegsize */
  452                                 0,                      /* flags */
  453                                 NULL, NULL,             /* lockfunc, lockarg */
  454                                 &sc->mfi_sense_dmat)) {
  455                 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
  456                 return (ENOMEM);
  457         }
  458         if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
  459             BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
  460                 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
  461                 return (ENOMEM);
  462         }
  463         bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
  464             sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
  465 
  466         if ((error = mfi_alloc_commands(sc)) != 0)
  467                 return (error);
  468 
  469         if ((error = mfi_comms_init(sc)) != 0)
  470                 return (error);
  471 
  472         if ((error = mfi_get_controller_info(sc)) != 0)
  473                 return (error);
  474 
  475         mtx_lock(&sc->mfi_io_lock);
  476         if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
  477                 mtx_unlock(&sc->mfi_io_lock);
  478                 return (error);
  479         }
  480         mtx_unlock(&sc->mfi_io_lock);
  481 
  482         /*
  483          * Set up the interrupt handler.  XXX This should happen in
  484          * mfi_pci.c
  485          */
  486         sc->mfi_irq_rid = 0;
  487         if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
  488             &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
  489                 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
  490                 return (EINVAL);
  491         }
  492         if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
  493             NULL, mfi_intr, sc, &sc->mfi_intr)) {
  494                 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
  495                 return (EINVAL);
  496         }
  497 
  498         /* Register a config hook to probe the bus for arrays */
  499         sc->mfi_ich.ich_func = mfi_startup;
  500         sc->mfi_ich.ich_arg = sc;
  501         if (config_intrhook_establish(&sc->mfi_ich) != 0) {
  502                 device_printf(sc->mfi_dev, "Cannot establish configuration "
  503                     "hook\n");
  504                 return (EINVAL);
  505         }
  506 
  507         /*
  508          * Register a shutdown handler.
  509          */
  510         if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
  511             sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
  512                 device_printf(sc->mfi_dev, "Warning: shutdown event "
  513                     "registration failed\n");
  514         }
  515 
  516         /*
  517          * Create the control device for doing management
  518          */
  519         unit = device_get_unit(sc->mfi_dev);
  520         sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
  521             0640, "mfi%d", unit);
  522         if (unit == 0)
  523                 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
  524         if (sc->mfi_cdev != NULL)
  525                 sc->mfi_cdev->si_drv1 = sc;
  526         SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
  527             SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
  528             OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
  529             &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
  530         SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
  531             SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
  532             OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
  533             &sc->mfi_keep_deleted_volumes, 0,
  534             "Don't detach the mfid device for a busy volume that is deleted");
  535 
  536         device_add_child(sc->mfi_dev, "mfip", -1);
  537         bus_generic_attach(sc->mfi_dev);
  538 
  539         /* Start the timeout watchdog */
  540         callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
  541         callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
  542             mfi_timeout, sc);
  543 
  544         return (0);
  545 }
  546 
  547 static int
  548 mfi_alloc_commands(struct mfi_softc *sc)
  549 {
  550         struct mfi_command *cm;
  551         int i, ncmds;
  552 
  553         /*
  554          * XXX Should we allocate all the commands up front, or allocate on
  555          * demand later like 'aac' does?
  556          */
  557         ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
  558         if (bootverbose)
  559                 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
  560                    "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
  561 
  562         sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
  563             M_WAITOK | M_ZERO);
  564 
  565         for (i = 0; i < ncmds; i++) {
  566                 cm = &sc->mfi_commands[i];
  567                 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
  568                     sc->mfi_cmd_size * i);
  569                 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
  570                     sc->mfi_cmd_size * i;
  571                 cm->cm_frame->header.context = i;
  572                 cm->cm_sense = &sc->mfi_sense[i];
  573                 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
  574                 cm->cm_sc = sc;
  575                 cm->cm_index = i;
  576                 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
  577                     &cm->cm_dmamap) == 0)
  578                         mfi_release_command(cm);
  579                 else
  580                         break;
  581                 sc->mfi_total_cmds++;
  582         }
  583 
  584         return (0);
  585 }
  586 
  587 void
  588 mfi_release_command(struct mfi_command *cm)
  589 {
  590         struct mfi_frame_header *hdr;
  591         uint32_t *hdr_data;
  592 
  593         /*
  594          * Zero out the important fields of the frame, but make sure the
  595          * context field is preserved.  For efficiency, handle the fields
  596          * as 32 bit words.  Clear out the first S/G entry too for safety.
  597          */
  598         hdr = &cm->cm_frame->header;
  599         if (cm->cm_data != NULL && hdr->sg_count) {
  600                 cm->cm_sg->sg32[0].len = 0;
  601                 cm->cm_sg->sg32[0].addr = 0;
  602         }
  603 
  604         hdr_data = (uint32_t *)cm->cm_frame;
  605         hdr_data[0] = 0;        /* cmd, sense_len, cmd_status, scsi_status */
  606         hdr_data[1] = 0;        /* target_id, lun_id, cdb_len, sg_count */
  607         hdr_data[4] = 0;        /* flags, timeout */
  608         hdr_data[5] = 0;        /* data_len */
  609 
  610         cm->cm_extra_frames = 0;
  611         cm->cm_flags = 0;
  612         cm->cm_complete = NULL;
  613         cm->cm_private = NULL;
  614         cm->cm_data = NULL;
  615         cm->cm_sg = 0;
  616         cm->cm_total_frame_size = 0;
  617 
  618         mfi_enqueue_free(cm);
  619 }
  620 
  621 static int
  622 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
  623     void **bufp, size_t bufsize)
  624 {
  625         struct mfi_command *cm;
  626         struct mfi_dcmd_frame *dcmd;
  627         void *buf = NULL;
  628         
  629         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
  630         
  631         cm = mfi_dequeue_free(sc);
  632         if (cm == NULL)
  633                 return (EBUSY);
  634 
  635         if ((bufsize > 0) && (bufp != NULL)) {
  636                 if (*bufp == NULL) {
  637                         buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
  638                         if (buf == NULL) {
  639                                 mfi_release_command(cm);
  640                                 return (ENOMEM);
  641                         }
  642                         *bufp = buf;
  643                 } else {
  644                         buf = *bufp;
  645                 }
  646         }
  647 
  648         dcmd =  &cm->cm_frame->dcmd;
  649         bzero(dcmd->mbox, MFI_MBOX_SIZE);
  650         dcmd->header.cmd = MFI_CMD_DCMD;
  651         dcmd->header.timeout = 0;
  652         dcmd->header.flags = 0;
  653         dcmd->header.data_len = bufsize;
  654         dcmd->opcode = opcode;
  655         cm->cm_sg = &dcmd->sgl;
  656         cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
  657         cm->cm_flags = 0;
  658         cm->cm_data = buf;
  659         cm->cm_private = buf;
  660         cm->cm_len = bufsize;
  661 
  662         *cmp = cm;
  663         if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
  664                 *bufp = buf;
  665         return (0);
  666 }
  667 
  668 static int
  669 mfi_comms_init(struct mfi_softc *sc)
  670 {
  671         struct mfi_command *cm;
  672         struct mfi_init_frame *init;
  673         struct mfi_init_qinfo *qinfo;
  674         int error;
  675 
  676         mtx_lock(&sc->mfi_io_lock);
  677         if ((cm = mfi_dequeue_free(sc)) == NULL)
  678                 return (EBUSY);
  679 
  680         /*
  681          * Abuse the SG list area of the frame to hold the init_qinfo
  682          * object;
  683          */
  684         init = &cm->cm_frame->init;
  685         qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
  686 
  687         bzero(qinfo, sizeof(struct mfi_init_qinfo));
  688         qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
  689         qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
  690             offsetof(struct mfi_hwcomms, hw_reply_q);
  691         qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
  692             offsetof(struct mfi_hwcomms, hw_pi);
  693         qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
  694             offsetof(struct mfi_hwcomms, hw_ci);
  695 
  696         init->header.cmd = MFI_CMD_INIT;
  697         init->header.data_len = sizeof(struct mfi_init_qinfo);
  698         init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
  699         cm->cm_data = NULL;
  700         cm->cm_flags = MFI_CMD_POLLED;
  701 
  702         if ((error = mfi_mapcmd(sc, cm)) != 0) {
  703                 device_printf(sc->mfi_dev, "failed to send init command\n");
  704                 mtx_unlock(&sc->mfi_io_lock);
  705                 return (error);
  706         }
  707         mfi_release_command(cm);
  708         mtx_unlock(&sc->mfi_io_lock);
  709 
  710         return (0);
  711 }
  712 
  713 static int
  714 mfi_get_controller_info(struct mfi_softc *sc)
  715 {
  716         struct mfi_command *cm = NULL;
  717         struct mfi_ctrl_info *ci = NULL;
  718         uint32_t max_sectors_1, max_sectors_2;
  719         int error;
  720 
  721         mtx_lock(&sc->mfi_io_lock);
  722         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
  723             (void **)&ci, sizeof(*ci));
  724         if (error)
  725                 goto out;
  726         cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
  727 
  728         if ((error = mfi_mapcmd(sc, cm)) != 0) {
  729                 device_printf(sc->mfi_dev, "Failed to get controller info\n");
  730                 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
  731                     MFI_SECTOR_LEN;
  732                 error = 0;
  733                 goto out;
  734         }
  735 
  736         bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
  737             BUS_DMASYNC_POSTREAD);
  738         bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
  739 
  740         max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
  741         max_sectors_2 = ci->max_request_size;
  742         sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
  743 
  744 out:
  745         if (ci)
  746                 free(ci, M_MFIBUF);
  747         if (cm)
  748                 mfi_release_command(cm);
  749         mtx_unlock(&sc->mfi_io_lock);
  750         return (error);
  751 }
  752 
  753 static int
  754 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
  755 {
  756         struct mfi_command *cm = NULL;
  757         int error;
  758 
  759         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
  760             (void **)log_state, sizeof(**log_state));
  761         if (error)
  762                 goto out;
  763         cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
  764 
  765         if ((error = mfi_mapcmd(sc, cm)) != 0) {
  766                 device_printf(sc->mfi_dev, "Failed to get log state\n");
  767                 goto out;
  768         }
  769 
  770         bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
  771             BUS_DMASYNC_POSTREAD);
  772         bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
  773 
  774 out:
  775         if (cm)
  776                 mfi_release_command(cm);
  777 
  778         return (error);
  779 }
  780 
  781 static int
  782 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
  783 {
  784         struct mfi_evt_log_state *log_state = NULL;
  785         union mfi_evt class_locale;
  786         int error = 0;
  787         uint32_t seq;
  788 
  789         class_locale.members.reserved = 0;
  790         class_locale.members.locale = mfi_event_locale;
  791         class_locale.members.class  = mfi_event_class;
  792 
  793         if (seq_start == 0) {
  794                 error = mfi_get_log_state(sc, &log_state);
  795                 if (error) {
  796                         if (log_state)
  797                                 free(log_state, M_MFIBUF);
  798                         return (error);
  799                 }
  800 
  801                 /*
  802                  * Walk through any events that fired since the last
  803                  * shutdown.
  804                  */
  805                 mfi_parse_entries(sc, log_state->shutdown_seq_num,
  806                     log_state->newest_seq_num);
  807                 seq = log_state->newest_seq_num;
  808         } else
  809                 seq = seq_start;
  810         mfi_aen_register(sc, seq, class_locale.word);
  811         free(log_state, M_MFIBUF);
  812 
  813         return 0;
  814 }
  815 
  816 static int
  817 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
  818 {
  819 
  820         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
  821         cm->cm_complete = NULL;
  822 
  823 
  824         /*
  825          * MegaCli can issue a DCMD of 0.  In this case do nothing
  826          * and return 0 to it as status
  827          */
  828         if (cm->cm_frame->dcmd.opcode == 0) {
  829                 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
  830                 cm->cm_error = 0;
  831                 return (cm->cm_error);
  832         }
  833         mfi_enqueue_ready(cm);
  834         mfi_startio(sc);
  835         if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
  836                 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
  837         return (cm->cm_error);
  838 }
  839 
  840 void
  841 mfi_free(struct mfi_softc *sc)
  842 {
  843         struct mfi_command *cm;
  844         int i;
  845 
  846         callout_drain(&sc->mfi_watchdog_callout);
  847 
  848         if (sc->mfi_cdev != NULL)
  849                 destroy_dev(sc->mfi_cdev);
  850 
  851         if (sc->mfi_total_cmds != 0) {
  852                 for (i = 0; i < sc->mfi_total_cmds; i++) {
  853                         cm = &sc->mfi_commands[i];
  854                         bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
  855                 }
  856                 free(sc->mfi_commands, M_MFIBUF);
  857         }
  858 
  859         if (sc->mfi_intr)
  860                 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
  861         if (sc->mfi_irq != NULL)
  862                 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
  863                     sc->mfi_irq);
  864 
  865         if (sc->mfi_sense_busaddr != 0)
  866                 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
  867         if (sc->mfi_sense != NULL)
  868                 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
  869                     sc->mfi_sense_dmamap);
  870         if (sc->mfi_sense_dmat != NULL)
  871                 bus_dma_tag_destroy(sc->mfi_sense_dmat);
  872 
  873         if (sc->mfi_frames_busaddr != 0)
  874                 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
  875         if (sc->mfi_frames != NULL)
  876                 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
  877                     sc->mfi_frames_dmamap);
  878         if (sc->mfi_frames_dmat != NULL)
  879                 bus_dma_tag_destroy(sc->mfi_frames_dmat);
  880 
  881         if (sc->mfi_comms_busaddr != 0)
  882                 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
  883         if (sc->mfi_comms != NULL)
  884                 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
  885                     sc->mfi_comms_dmamap);
  886         if (sc->mfi_comms_dmat != NULL)
  887                 bus_dma_tag_destroy(sc->mfi_comms_dmat);
  888 
  889         if (sc->mfi_buffer_dmat != NULL)
  890                 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
  891         if (sc->mfi_parent_dmat != NULL)
  892                 bus_dma_tag_destroy(sc->mfi_parent_dmat);
  893 
  894         if (mtx_initialized(&sc->mfi_io_lock)) {
  895                 mtx_destroy(&sc->mfi_io_lock);
  896                 sx_destroy(&sc->mfi_config_lock);
  897         }
  898 
  899         return;
  900 }
  901 
  902 static void
  903 mfi_startup(void *arg)
  904 {
  905         struct mfi_softc *sc;
  906 
  907         sc = (struct mfi_softc *)arg;
  908 
  909         config_intrhook_disestablish(&sc->mfi_ich);
  910 
  911         sc->mfi_enable_intr(sc);
  912         sx_xlock(&sc->mfi_config_lock);
  913         mtx_lock(&sc->mfi_io_lock);
  914         mfi_ldprobe(sc);
  915         mtx_unlock(&sc->mfi_io_lock);
  916         sx_xunlock(&sc->mfi_config_lock);
  917 }
  918 
  919 static void
  920 mfi_intr(void *arg)
  921 {
  922         struct mfi_softc *sc;
  923         struct mfi_command *cm;
  924         uint32_t pi, ci, context;
  925 
  926         sc = (struct mfi_softc *)arg;
  927 
  928         if (sc->mfi_check_clear_intr(sc))
  929                 return;
  930 
  931         pi = sc->mfi_comms->hw_pi;
  932         ci = sc->mfi_comms->hw_ci;
  933         mtx_lock(&sc->mfi_io_lock);
  934         while (ci != pi) {
  935                 context = sc->mfi_comms->hw_reply_q[ci];
  936                 if (context < sc->mfi_max_fw_cmds) {
  937                         cm = &sc->mfi_commands[context];
  938                         mfi_remove_busy(cm);
  939                         cm->cm_error = 0;
  940                         mfi_complete(sc, cm);
  941                 }
  942                 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
  943                         ci = 0;
  944                 }
  945         }
  946 
  947         sc->mfi_comms->hw_ci = ci;
  948 
  949         /* Give defered I/O a chance to run */
  950         if (sc->mfi_flags & MFI_FLAGS_QFRZN)
  951                 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
  952         mfi_startio(sc);
  953         mtx_unlock(&sc->mfi_io_lock);
  954 
  955         return;
  956 }
  957 
  958 int
  959 mfi_shutdown(struct mfi_softc *sc)
  960 {
  961         struct mfi_dcmd_frame *dcmd;
  962         struct mfi_command *cm;
  963         int error;
  964 
  965         mtx_lock(&sc->mfi_io_lock);
  966         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
  967         if (error) {
  968                 mtx_unlock(&sc->mfi_io_lock);
  969                 return (error);
  970         }
  971 
  972         if (sc->mfi_aen_cm != NULL)
  973                 mfi_abort(sc, sc->mfi_aen_cm);
  974 
  975         dcmd = &cm->cm_frame->dcmd;
  976         dcmd->header.flags = MFI_FRAME_DIR_NONE;
  977         cm->cm_flags = MFI_CMD_POLLED;
  978         cm->cm_data = NULL;
  979 
  980         if ((error = mfi_mapcmd(sc, cm)) != 0) {
  981                 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
  982         }
  983 
  984         mfi_release_command(cm);
  985         mtx_unlock(&sc->mfi_io_lock);
  986         return (error);
  987 }
  988 
  989 static void
  990 mfi_ldprobe(struct mfi_softc *sc)
  991 {
  992         struct mfi_frame_header *hdr;
  993         struct mfi_command *cm = NULL;
  994         struct mfi_ld_list *list = NULL;
  995         struct mfi_disk *ld;
  996         int error, i;
  997 
  998         sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
  999         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1000 
 1001         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
 1002             (void **)&list, sizeof(*list));
 1003         if (error)
 1004                 goto out;
 1005 
 1006         cm->cm_flags = MFI_CMD_DATAIN;
 1007         if (mfi_wait_command(sc, cm) != 0) {
 1008                 device_printf(sc->mfi_dev, "Failed to get device listing\n");
 1009                 goto out;
 1010         }
 1011 
 1012         hdr = &cm->cm_frame->header;
 1013         if (hdr->cmd_status != MFI_STAT_OK) {
 1014                 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
 1015                     hdr->cmd_status);
 1016                 goto out;
 1017         }
 1018 
 1019         for (i = 0; i < list->ld_count; i++) {
 1020                 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 1021                         if (ld->ld_id == list->ld_list[i].ld.v.target_id)
 1022                                 goto skip_add;
 1023                 }
 1024                 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
 1025         skip_add:;
 1026         }
 1027 out:
 1028         if (list)
 1029                 free(list, M_MFIBUF);
 1030         if (cm)
 1031                 mfi_release_command(cm);
 1032 
 1033         return;
 1034 }
 1035 
 1036 /*
 1037  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
 1038  * the bits in 24-31 are all set, then it is the number of seconds since
 1039  * boot.
 1040  */
 1041 static const char *
 1042 format_timestamp(uint32_t timestamp)
 1043 {
 1044         static char buffer[32];
 1045 
 1046         if ((timestamp & 0xff000000) == 0xff000000)
 1047                 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
 1048                     0x00ffffff);
 1049         else
 1050                 snprintf(buffer, sizeof(buffer), "%us", timestamp);
 1051         return (buffer);
 1052 }
 1053 
 1054 static const char *
 1055 format_class(int8_t class)
 1056 {
 1057         static char buffer[6];
 1058 
 1059         switch (class) {
 1060         case MFI_EVT_CLASS_DEBUG:
 1061                 return ("debug");
 1062         case MFI_EVT_CLASS_PROGRESS:
 1063                 return ("progress");
 1064         case MFI_EVT_CLASS_INFO:
 1065                 return ("info");
 1066         case MFI_EVT_CLASS_WARNING:
 1067                 return ("WARN");
 1068         case MFI_EVT_CLASS_CRITICAL:
 1069                 return ("CRIT");
 1070         case MFI_EVT_CLASS_FATAL:
 1071                 return ("FATAL");
 1072         case MFI_EVT_CLASS_DEAD:
 1073                 return ("DEAD");
 1074         default:
 1075                 snprintf(buffer, sizeof(buffer), "%d", class);
 1076                 return (buffer);
 1077         }
 1078 }
 1079 
 1080 static void
 1081 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
 1082 {
 1083 
 1084         device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
 1085             format_timestamp(detail->time), detail->class.members.locale,
 1086             format_class(detail->class.members.class), detail->description);
 1087 }
 1088 
 1089 static int
 1090 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
 1091 {
 1092         struct mfi_command *cm;
 1093         struct mfi_dcmd_frame *dcmd;
 1094         union mfi_evt current_aen, prior_aen;
 1095         struct mfi_evt_detail *ed = NULL;
 1096         int error = 0;
 1097 
 1098         current_aen.word = locale;
 1099         if (sc->mfi_aen_cm != NULL) {
 1100                 prior_aen.word =
 1101                     ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
 1102                 if (prior_aen.members.class <= current_aen.members.class &&
 1103                     !((prior_aen.members.locale & current_aen.members.locale)
 1104                     ^current_aen.members.locale)) {
 1105                         return (0);
 1106                 } else {
 1107                         prior_aen.members.locale |= current_aen.members.locale;
 1108                         if (prior_aen.members.class
 1109                             < current_aen.members.class)
 1110                                 current_aen.members.class =
 1111                                     prior_aen.members.class;
 1112                         mfi_abort(sc, sc->mfi_aen_cm);
 1113                 }
 1114         }
 1115 
 1116         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
 1117             (void **)&ed, sizeof(*ed));
 1118         if (error) {
 1119                 goto out;
 1120         }
 1121 
 1122         dcmd = &cm->cm_frame->dcmd;
 1123         ((uint32_t *)&dcmd->mbox)[0] = seq;
 1124         ((uint32_t *)&dcmd->mbox)[1] = locale;
 1125         cm->cm_flags = MFI_CMD_DATAIN;
 1126         cm->cm_complete = mfi_aen_complete;
 1127 
 1128         sc->mfi_aen_cm = cm;
 1129 
 1130         mfi_enqueue_ready(cm);
 1131         mfi_startio(sc);
 1132 
 1133 out:
 1134         return (error);
 1135 }
 1136 
 1137 static void
 1138 mfi_aen_complete(struct mfi_command *cm)
 1139 {
 1140         struct mfi_frame_header *hdr;
 1141         struct mfi_softc *sc;
 1142         struct mfi_evt_detail *detail;
 1143         struct mfi_aen *mfi_aen_entry, *tmp;
 1144         int seq = 0, aborted = 0;
 1145 
 1146         sc = cm->cm_sc;
 1147         hdr = &cm->cm_frame->header;
 1148 
 1149         if (sc->mfi_aen_cm == NULL)
 1150                 return;
 1151 
 1152         if (sc->mfi_aen_cm->cm_aen_abort || hdr->cmd_status == 0xff) {
 1153                 sc->mfi_aen_cm->cm_aen_abort = 0;
 1154                 aborted = 1;
 1155         } else {
 1156                 sc->mfi_aen_triggered = 1;
 1157                 if (sc->mfi_poll_waiting) {
 1158                         sc->mfi_poll_waiting = 0;
 1159                         selwakeup(&sc->mfi_select);
 1160                 }
 1161                 detail = cm->cm_data;
 1162                 /*
 1163                  * XXX If this function is too expensive or is recursive, then
 1164                  * events should be put onto a queue and processed later.
 1165                  */
 1166                 mfi_decode_evt(sc, detail);
 1167                 seq = detail->seq + 1;
 1168                 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
 1169                         TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
 1170                             aen_link);
 1171                         PROC_LOCK(mfi_aen_entry->p);
 1172                         psignal(mfi_aen_entry->p, SIGIO);
 1173                         PROC_UNLOCK(mfi_aen_entry->p);
 1174                         free(mfi_aen_entry, M_MFIBUF);
 1175                 }
 1176         }
 1177 
 1178         free(cm->cm_data, M_MFIBUF);
 1179         sc->mfi_aen_cm = NULL;
 1180         wakeup(&sc->mfi_aen_cm);
 1181         mfi_release_command(cm);
 1182 
 1183         /* set it up again so the driver can catch more events */
 1184         if (!aborted) {
 1185                 mfi_aen_setup(sc, seq);
 1186         }
 1187 }
 1188 
 1189 #define MAX_EVENTS 15
 1190 
 1191 static int
 1192 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
 1193 {
 1194         struct mfi_command *cm;
 1195         struct mfi_dcmd_frame *dcmd;
 1196         struct mfi_evt_list *el;
 1197         union mfi_evt class_locale;
 1198         int error, i, seq, size;
 1199 
 1200         class_locale.members.reserved = 0;
 1201         class_locale.members.locale = mfi_event_locale;
 1202         class_locale.members.class  = mfi_event_class;
 1203 
 1204         size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
 1205                 * (MAX_EVENTS - 1);
 1206         el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
 1207         if (el == NULL)
 1208                 return (ENOMEM);
 1209 
 1210         for (seq = start_seq;;) {
 1211                 if ((cm = mfi_dequeue_free(sc)) == NULL) {
 1212                         free(el, M_MFIBUF);
 1213                         return (EBUSY);
 1214                 }
 1215 
 1216                 dcmd = &cm->cm_frame->dcmd;
 1217                 bzero(dcmd->mbox, MFI_MBOX_SIZE);
 1218                 dcmd->header.cmd = MFI_CMD_DCMD;
 1219                 dcmd->header.timeout = 0;
 1220                 dcmd->header.data_len = size;
 1221                 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
 1222                 ((uint32_t *)&dcmd->mbox)[0] = seq;
 1223                 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
 1224                 cm->cm_sg = &dcmd->sgl;
 1225                 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
 1226                 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
 1227                 cm->cm_data = el;
 1228                 cm->cm_len = size;
 1229 
 1230                 if ((error = mfi_mapcmd(sc, cm)) != 0) {
 1231                         device_printf(sc->mfi_dev,
 1232                             "Failed to get controller entries\n");
 1233                         mfi_release_command(cm);
 1234                         break;
 1235                 }
 1236 
 1237                 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
 1238                     BUS_DMASYNC_POSTREAD);
 1239                 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
 1240 
 1241                 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
 1242                         mfi_release_command(cm);
 1243                         break;
 1244                 }
 1245                 if (dcmd->header.cmd_status != MFI_STAT_OK) {
 1246                         device_printf(sc->mfi_dev,
 1247                             "Error %d fetching controller entries\n",
 1248                             dcmd->header.cmd_status);
 1249                         mfi_release_command(cm);
 1250                         break;
 1251                 }
 1252                 mfi_release_command(cm);
 1253 
 1254                 for (i = 0; i < el->count; i++) {
 1255                         /*
 1256                          * If this event is newer than 'stop_seq' then
 1257                          * break out of the loop.  Note that the log
 1258                          * is a circular buffer so we have to handle
 1259                          * the case that our stop point is earlier in
 1260                          * the buffer than our start point.
 1261                          */
 1262                         if (el->event[i].seq >= stop_seq) {
 1263                                 if (start_seq <= stop_seq)
 1264                                         break;
 1265                                 else if (el->event[i].seq < start_seq)
 1266                                         break;
 1267                         }
 1268                         mfi_decode_evt(sc, &el->event[i]);
 1269                 }
 1270                 seq = el->event[el->count - 1].seq + 1;
 1271         }
 1272 
 1273         free(el, M_MFIBUF);
 1274         return (0);
 1275 }
 1276 
 1277 static int
 1278 mfi_add_ld(struct mfi_softc *sc, int id)
 1279 {
 1280         struct mfi_command *cm;
 1281         struct mfi_dcmd_frame *dcmd = NULL;
 1282         struct mfi_ld_info *ld_info = NULL;
 1283         int error;
 1284 
 1285         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1286 
 1287         error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
 1288             (void **)&ld_info, sizeof(*ld_info));
 1289         if (error) {
 1290                 device_printf(sc->mfi_dev,
 1291                     "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
 1292                 if (ld_info)
 1293                         free(ld_info, M_MFIBUF);
 1294                 return (error);
 1295         }
 1296         cm->cm_flags = MFI_CMD_DATAIN;
 1297         dcmd = &cm->cm_frame->dcmd;
 1298         dcmd->mbox[0] = id;
 1299         if (mfi_wait_command(sc, cm) != 0) {
 1300                 device_printf(sc->mfi_dev,
 1301                     "Failed to get logical drive: %d\n", id);
 1302                 free(ld_info, M_MFIBUF);
 1303                 return (0);
 1304         }
 1305 
 1306         mfi_add_ld_complete(cm);
 1307         return (0);
 1308 }
 1309 
 1310 static void
 1311 mfi_add_ld_complete(struct mfi_command *cm)
 1312 {
 1313         struct mfi_frame_header *hdr;
 1314         struct mfi_ld_info *ld_info;
 1315         struct mfi_softc *sc;
 1316         device_t child;
 1317 
 1318         sc = cm->cm_sc;
 1319         hdr = &cm->cm_frame->header;
 1320         ld_info = cm->cm_private;
 1321 
 1322         if (hdr->cmd_status != MFI_STAT_OK) {
 1323                 free(ld_info, M_MFIBUF);
 1324                 mfi_release_command(cm);
 1325                 return;
 1326         }
 1327         mfi_release_command(cm);
 1328 
 1329         mtx_unlock(&sc->mfi_io_lock);
 1330         mtx_lock(&Giant);
 1331         if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
 1332                 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
 1333                 free(ld_info, M_MFIBUF);
 1334                 mtx_unlock(&Giant);
 1335                 mtx_lock(&sc->mfi_io_lock);
 1336                 return;
 1337         }
 1338 
 1339         device_set_ivars(child, ld_info);
 1340         device_set_desc(child, "MFI Logical Disk");
 1341         bus_generic_attach(sc->mfi_dev);
 1342         mtx_unlock(&Giant);
 1343         mtx_lock(&sc->mfi_io_lock);
 1344 }
 1345 
 1346 static struct mfi_command *
 1347 mfi_bio_command(struct mfi_softc *sc)
 1348 {
 1349         struct mfi_io_frame *io;
 1350         struct mfi_command *cm;
 1351         struct bio *bio;
 1352         int flags, blkcount;
 1353 
 1354         if ((cm = mfi_dequeue_free(sc)) == NULL)
 1355                 return (NULL);
 1356 
 1357         if ((bio = mfi_dequeue_bio(sc)) == NULL) {
 1358                 mfi_release_command(cm);
 1359                 return (NULL);
 1360         }
 1361 
 1362         io = &cm->cm_frame->io;
 1363         switch (bio->bio_cmd & 0x03) {
 1364         case BIO_READ:
 1365                 io->header.cmd = MFI_CMD_LD_READ;
 1366                 flags = MFI_CMD_DATAIN;
 1367                 break;
 1368         case BIO_WRITE:
 1369                 io->header.cmd = MFI_CMD_LD_WRITE;
 1370                 flags = MFI_CMD_DATAOUT;
 1371                 break;
 1372         default:
 1373                 panic("Invalid bio command");
 1374         }
 1375 
 1376         /* Cheat with the sector length to avoid a non-constant division */
 1377         blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
 1378         io->header.target_id = (uintptr_t)bio->bio_driver1;
 1379         io->header.timeout = 0;
 1380         io->header.flags = 0;
 1381         io->header.sense_len = MFI_SENSE_LEN;
 1382         io->header.data_len = blkcount;
 1383         io->sense_addr_lo = cm->cm_sense_busaddr;
 1384         io->sense_addr_hi = 0;
 1385         io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
 1386         io->lba_lo = bio->bio_pblkno & 0xffffffff;
 1387         cm->cm_complete = mfi_bio_complete;
 1388         cm->cm_private = bio;
 1389         cm->cm_data = bio->bio_data;
 1390         cm->cm_len = bio->bio_bcount;
 1391         cm->cm_sg = &io->sgl;
 1392         cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
 1393         cm->cm_flags = flags;
 1394         return (cm);
 1395 }
 1396 
 1397 static void
 1398 mfi_bio_complete(struct mfi_command *cm)
 1399 {
 1400         struct bio *bio;
 1401         struct mfi_frame_header *hdr;
 1402         struct mfi_softc *sc;
 1403 
 1404         bio = cm->cm_private;
 1405         hdr = &cm->cm_frame->header;
 1406         sc = cm->cm_sc;
 1407 
 1408         if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) {
 1409                 bio->bio_flags |= BIO_ERROR;
 1410                 bio->bio_error = EIO;
 1411                 device_printf(sc->mfi_dev, "I/O error, status= %d "
 1412                     "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
 1413                 mfi_print_sense(cm->cm_sc, cm->cm_sense);
 1414         } else if (cm->cm_error != 0) {
 1415                 bio->bio_flags |= BIO_ERROR;
 1416         }
 1417 
 1418         mfi_release_command(cm);
 1419         mfi_disk_complete(bio);
 1420 }
 1421 
 1422 void
 1423 mfi_startio(struct mfi_softc *sc)
 1424 {
 1425         struct mfi_command *cm;
 1426         struct ccb_hdr *ccbh;
 1427 
 1428         for (;;) {
 1429                 /* Don't bother if we're short on resources */
 1430                 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
 1431                         break;
 1432 
 1433                 /* Try a command that has already been prepared */
 1434                 cm = mfi_dequeue_ready(sc);
 1435 
 1436                 if (cm == NULL) {
 1437                         if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
 1438                                 cm = sc->mfi_cam_start(ccbh);
 1439                 }
 1440 
 1441                 /* Nope, so look for work on the bioq */
 1442                 if (cm == NULL)
 1443                         cm = mfi_bio_command(sc);
 1444 
 1445                 /* No work available, so exit */
 1446                 if (cm == NULL)
 1447                         break;
 1448 
 1449                 /* Send the command to the controller */
 1450                 if (mfi_mapcmd(sc, cm) != 0) {
 1451                         mfi_requeue_ready(cm);
 1452                         break;
 1453                 }
 1454         }
 1455 }
 1456 
 1457 static int
 1458 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
 1459 {
 1460         int error, polled;
 1461 
 1462         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1463 
 1464         if (cm->cm_data != NULL) {
 1465                 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
 1466                 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
 1467                     cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
 1468                 if (error == EINPROGRESS) {
 1469                         sc->mfi_flags |= MFI_FLAGS_QFRZN;
 1470                         return (0);
 1471                 }
 1472         } else {
 1473                 error = mfi_send_frame(sc, cm);
 1474         }
 1475 
 1476         return (error);
 1477 }
 1478 
 1479 static void
 1480 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
 1481 {
 1482         struct mfi_frame_header *hdr;
 1483         struct mfi_command *cm;
 1484         union mfi_sgl *sgl;
 1485         struct mfi_softc *sc;
 1486         int i, dir;
 1487 
 1488         cm = (struct mfi_command *)arg;
 1489         sc = cm->cm_sc;
 1490         hdr = &cm->cm_frame->header;
 1491         sgl = cm->cm_sg;
 1492 
 1493         if (error) {
 1494                 printf("error %d in callback\n", error);
 1495                 cm->cm_error = error;
 1496                 mfi_complete(sc, cm);
 1497                 return;
 1498         }
 1499 
 1500         if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
 1501                 for (i = 0; i < nsegs; i++) {
 1502                         sgl->sg32[i].addr = segs[i].ds_addr;
 1503                         sgl->sg32[i].len = segs[i].ds_len;
 1504                 }
 1505         } else {
 1506                 for (i = 0; i < nsegs; i++) {
 1507                         sgl->sg64[i].addr = segs[i].ds_addr;
 1508                         sgl->sg64[i].len = segs[i].ds_len;
 1509                 }
 1510                 hdr->flags |= MFI_FRAME_SGL64;
 1511         }
 1512         hdr->sg_count = nsegs;
 1513 
 1514         dir = 0;
 1515         if (cm->cm_flags & MFI_CMD_DATAIN) {
 1516                 dir |= BUS_DMASYNC_PREREAD;
 1517                 hdr->flags |= MFI_FRAME_DIR_READ;
 1518         }
 1519         if (cm->cm_flags & MFI_CMD_DATAOUT) {
 1520                 dir |= BUS_DMASYNC_PREWRITE;
 1521                 hdr->flags |= MFI_FRAME_DIR_WRITE;
 1522         }
 1523         bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
 1524         cm->cm_flags |= MFI_CMD_MAPPED;
 1525 
 1526         /*
 1527          * Instead of calculating the total number of frames in the
 1528          * compound frame, it's already assumed that there will be at
 1529          * least 1 frame, so don't compensate for the modulo of the
 1530          * following division.
 1531          */
 1532         cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
 1533         cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
 1534 
 1535         mfi_send_frame(sc, cm);
 1536 
 1537         return;
 1538 }
 1539 
 1540 static int
 1541 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
 1542 {
 1543         struct mfi_frame_header *hdr;
 1544         int tm = MFI_POLL_TIMEOUT_SECS * 1000;
 1545 
 1546         hdr = &cm->cm_frame->header;
 1547 
 1548         if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
 1549                 cm->cm_timestamp = time_uptime;
 1550                 mfi_enqueue_busy(cm);
 1551         } else {
 1552                 hdr->cmd_status = 0xff;
 1553                 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
 1554         }
 1555 
 1556         /*
 1557          * The bus address of the command is aligned on a 64 byte boundary,
 1558          * leaving the least 6 bits as zero.  For whatever reason, the
 1559          * hardware wants the address shifted right by three, leaving just
 1560          * 3 zero bits.  These three bits are then used as a prefetching
 1561          * hint for the hardware to predict how many frames need to be
 1562          * fetched across the bus.  If a command has more than 8 frames
 1563          * then the 3 bits are set to 0x7 and the firmware uses other
 1564          * information in the command to determine the total amount to fetch.
 1565          * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
 1566          * is enough for both 32bit and 64bit systems.
 1567          */
 1568         if (cm->cm_extra_frames > 7)
 1569                 cm->cm_extra_frames = 7;
 1570 
 1571         sc->mfi_issue_cmd(sc,cm->cm_frame_busaddr,cm->cm_extra_frames);
 1572 
 1573         if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
 1574                 return (0);
 1575 
 1576         /* This is a polled command, so busy-wait for it to complete. */
 1577         while (hdr->cmd_status == 0xff) {
 1578                 DELAY(1000);
 1579                 tm -= 1;
 1580                 if (tm <= 0)
 1581                         break;
 1582         }
 1583 
 1584         if (hdr->cmd_status == 0xff) {
 1585                 device_printf(sc->mfi_dev, "Frame %p timed out "
 1586                               "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
 1587                 return (ETIMEDOUT);
 1588         }
 1589 
 1590         return (0);
 1591 }
 1592 
 1593 static void
 1594 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
 1595 {
 1596         int dir;
 1597 
 1598         if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
 1599                 dir = 0;
 1600                 if (cm->cm_flags & MFI_CMD_DATAIN)
 1601                         dir |= BUS_DMASYNC_POSTREAD;
 1602                 if (cm->cm_flags & MFI_CMD_DATAOUT)
 1603                         dir |= BUS_DMASYNC_POSTWRITE;
 1604 
 1605                 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
 1606                 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
 1607                 cm->cm_flags &= ~MFI_CMD_MAPPED;
 1608         }
 1609 
 1610         cm->cm_flags |= MFI_CMD_COMPLETED;
 1611 
 1612         if (cm->cm_complete != NULL)
 1613                 cm->cm_complete(cm);
 1614         else
 1615                 wakeup(cm);
 1616 }
 1617 
 1618 static int
 1619 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
 1620 {
 1621         struct mfi_command *cm;
 1622         struct mfi_abort_frame *abort;
 1623         int i = 0;
 1624 
 1625         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1626 
 1627         if ((cm = mfi_dequeue_free(sc)) == NULL) {
 1628                 return (EBUSY);
 1629         }
 1630 
 1631         abort = &cm->cm_frame->abort;
 1632         abort->header.cmd = MFI_CMD_ABORT;
 1633         abort->header.flags = 0;
 1634         abort->abort_context = cm_abort->cm_frame->header.context;
 1635         abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
 1636         abort->abort_mfi_addr_hi = 0;
 1637         cm->cm_data = NULL;
 1638         cm->cm_flags = MFI_CMD_POLLED;
 1639 
 1640         sc->mfi_aen_cm->cm_aen_abort = 1;
 1641         mfi_mapcmd(sc, cm);
 1642         mfi_release_command(cm);
 1643 
 1644         while (i < 5 && sc->mfi_aen_cm != NULL) {
 1645                 msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz);
 1646                 i++;
 1647         }
 1648 
 1649         return (0);
 1650 }
 1651 
 1652 int
 1653 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
 1654 {
 1655         struct mfi_command *cm;
 1656         struct mfi_io_frame *io;
 1657         int error;
 1658 
 1659         if ((cm = mfi_dequeue_free(sc)) == NULL)
 1660                 return (EBUSY);
 1661 
 1662         io = &cm->cm_frame->io;
 1663         io->header.cmd = MFI_CMD_LD_WRITE;
 1664         io->header.target_id = id;
 1665         io->header.timeout = 0;
 1666         io->header.flags = 0;
 1667         io->header.sense_len = MFI_SENSE_LEN;
 1668         io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
 1669         io->sense_addr_lo = cm->cm_sense_busaddr;
 1670         io->sense_addr_hi = 0;
 1671         io->lba_hi = (lba & 0xffffffff00000000) >> 32;
 1672         io->lba_lo = lba & 0xffffffff;
 1673         cm->cm_data = virt;
 1674         cm->cm_len = len;
 1675         cm->cm_sg = &io->sgl;
 1676         cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
 1677         cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
 1678 
 1679         error = mfi_mapcmd(sc, cm);
 1680         bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
 1681             BUS_DMASYNC_POSTWRITE);
 1682         bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
 1683         mfi_release_command(cm);
 1684 
 1685         return (error);
 1686 }
 1687 
 1688 static int
 1689 mfi_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
 1690 {
 1691         struct mfi_softc *sc;
 1692         int error;
 1693 
 1694         sc = dev->si_drv1;
 1695 
 1696         mtx_lock(&sc->mfi_io_lock);
 1697         if (sc->mfi_detaching)
 1698                 error = ENXIO;
 1699         else {
 1700                 sc->mfi_flags |= MFI_FLAGS_OPEN;
 1701                 error = 0;
 1702         }
 1703         mtx_unlock(&sc->mfi_io_lock);
 1704 
 1705         return (error);
 1706 }
 1707 
 1708 static int
 1709 mfi_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
 1710 {
 1711         struct mfi_softc *sc;
 1712         struct mfi_aen *mfi_aen_entry, *tmp;
 1713 
 1714         sc = dev->si_drv1;
 1715 
 1716         mtx_lock(&sc->mfi_io_lock);
 1717         sc->mfi_flags &= ~MFI_FLAGS_OPEN;
 1718 
 1719         TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
 1720                 if (mfi_aen_entry->p == curproc) {
 1721                         TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
 1722                             aen_link);
 1723                         free(mfi_aen_entry, M_MFIBUF);
 1724                 }
 1725         }
 1726         mtx_unlock(&sc->mfi_io_lock);
 1727         return (0);
 1728 }
 1729 
 1730 static int
 1731 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
 1732 {
 1733 
 1734         switch (opcode) {
 1735         case MFI_DCMD_LD_DELETE:
 1736         case MFI_DCMD_CFG_ADD:
 1737         case MFI_DCMD_CFG_CLEAR:
 1738                 sx_xlock(&sc->mfi_config_lock);
 1739                 return (1);
 1740         default:
 1741                 return (0);
 1742         }
 1743 }
 1744 
 1745 static void
 1746 mfi_config_unlock(struct mfi_softc *sc, int locked)
 1747 {
 1748 
 1749         if (locked)
 1750                 sx_xunlock(&sc->mfi_config_lock);
 1751 }
 1752 
 1753 /* Perform pre-issue checks on commands from userland and possibly veto them. */
 1754 static int
 1755 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
 1756 {
 1757         struct mfi_disk *ld, *ld2;
 1758         int error;
 1759 
 1760         mtx_assert(&sc->mfi_io_lock, MA_OWNED);
 1761         error = 0;
 1762         switch (cm->cm_frame->dcmd.opcode) {
 1763         case MFI_DCMD_LD_DELETE:
 1764                 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 1765                         if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
 1766                                 break;
 1767                 }
 1768                 if (ld == NULL)
 1769                         error = ENOENT;
 1770                 else
 1771                         error = mfi_disk_disable(ld);
 1772                 break;
 1773         case MFI_DCMD_CFG_CLEAR:
 1774                 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 1775                         error = mfi_disk_disable(ld);
 1776                         if (error)
 1777                                 break;
 1778                 }
 1779                 if (error) {
 1780                         TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
 1781                                 if (ld2 == ld)
 1782                                         break;
 1783                                 mfi_disk_enable(ld2);
 1784                         }
 1785                 }
 1786                 break;
 1787         default:
 1788                 break;
 1789         }
 1790         return (error);
 1791 }
 1792 
 1793 /* Perform post-issue checks on commands from userland. */
 1794 static void
 1795 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
 1796 {
 1797         struct mfi_disk *ld, *ldn;
 1798 
 1799         switch (cm->cm_frame->dcmd.opcode) {
 1800         case MFI_DCMD_LD_DELETE:
 1801                 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 1802                         if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
 1803                                 break;
 1804                 }
 1805                 KASSERT(ld != NULL, ("volume dissappeared"));
 1806                 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
 1807                         mtx_unlock(&sc->mfi_io_lock);
 1808                         mtx_lock(&Giant);
 1809                         device_delete_child(sc->mfi_dev, ld->ld_dev);
 1810                         mtx_unlock(&Giant);
 1811                         mtx_lock(&sc->mfi_io_lock);
 1812                 } else
 1813                         mfi_disk_enable(ld);
 1814                 break;
 1815         case MFI_DCMD_CFG_CLEAR:
 1816                 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
 1817                         mtx_unlock(&sc->mfi_io_lock);
 1818                         mtx_lock(&Giant);
 1819                         TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
 1820                                 device_delete_child(sc->mfi_dev, ld->ld_dev);
 1821                         }
 1822                         mtx_unlock(&Giant);
 1823                         mtx_lock(&sc->mfi_io_lock);
 1824                 } else {
 1825                         TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
 1826                                 mfi_disk_enable(ld);
 1827                 }
 1828                 break;
 1829         case MFI_DCMD_CFG_ADD:
 1830                 mfi_ldprobe(sc);
 1831                 break;
 1832         case MFI_DCMD_CFG_FOREIGN_IMPORT:
 1833                 mfi_ldprobe(sc);
 1834                 break;
 1835         }
 1836 }
 1837 
 1838 static int
 1839 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
 1840 {
 1841         struct mfi_command *cm;
 1842         struct mfi_dcmd_frame *dcmd;
 1843         void *ioc_buf = NULL;
 1844         uint32_t context;
 1845         int error = 0, locked;
 1846 
 1847 
 1848         if (ioc->buf_size > 0) {
 1849                 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
 1850                 if (ioc_buf == NULL) {
 1851                         return (ENOMEM);
 1852                 }
 1853                 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
 1854                 if (error) {
 1855                         device_printf(sc->mfi_dev, "failed to copyin\n");
 1856                         free(ioc_buf, M_MFIBUF);
 1857                         return (error);
 1858                 }
 1859         }
 1860 
 1861         locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
 1862 
 1863         mtx_lock(&sc->mfi_io_lock);
 1864         while ((cm = mfi_dequeue_free(sc)) == NULL)
 1865                 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
 1866 
 1867         /* Save context for later */
 1868         context = cm->cm_frame->header.context;
 1869 
 1870         dcmd = &cm->cm_frame->dcmd;
 1871         bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
 1872 
 1873         cm->cm_sg = &dcmd->sgl;
 1874         cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
 1875         cm->cm_data = ioc_buf;
 1876         cm->cm_len = ioc->buf_size;
 1877 
 1878         /* restore context */
 1879         cm->cm_frame->header.context = context;
 1880 
 1881         /* Cheat since we don't know if we're writing or reading */
 1882         cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
 1883 
 1884         error = mfi_check_command_pre(sc, cm);
 1885         if (error)
 1886                 goto out;
 1887 
 1888         error = mfi_wait_command(sc, cm);
 1889         if (error) {
 1890                 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
 1891                 goto out;
 1892         }
 1893         bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
 1894         mfi_check_command_post(sc, cm);
 1895 out:
 1896         mfi_release_command(cm);
 1897         mtx_unlock(&sc->mfi_io_lock);
 1898         mfi_config_unlock(sc, locked);
 1899         if (ioc->buf_size > 0)
 1900                 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
 1901         if (ioc_buf)
 1902                 free(ioc_buf, M_MFIBUF);
 1903         return (error);
 1904 }
 1905 
 1906 #ifdef __amd64__
 1907 #define PTRIN(p)                ((void *)(uintptr_t)(p))
 1908 #else
 1909 #define PTRIN(p)                (p)
 1910 #endif
 1911 
 1912 static int
 1913 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
 1914 {
 1915         struct mfi_softc *sc;
 1916         union mfi_statrequest *ms;
 1917         struct mfi_ioc_packet *ioc;
 1918 #ifdef __amd64__
 1919         struct mfi_ioc_packet32 *ioc32;
 1920 #endif
 1921         struct mfi_ioc_aen *aen;
 1922         struct mfi_command *cm = NULL;
 1923         uint32_t context;
 1924         union mfi_sense_ptr sense_ptr;
 1925         uint8_t *data = NULL, *temp;
 1926         int i;
 1927         struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
 1928 #ifdef __amd64__
 1929         struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
 1930         struct mfi_ioc_passthru iop_swab;
 1931 #endif
 1932         int error, locked;
 1933 
 1934         sc = dev->si_drv1;
 1935         error = 0;
 1936 
 1937         switch (cmd) {
 1938         case MFIIO_STATS:
 1939                 ms = (union mfi_statrequest *)arg;
 1940                 switch (ms->ms_item) {
 1941                 case MFIQ_FREE:
 1942                 case MFIQ_BIO:
 1943                 case MFIQ_READY:
 1944                 case MFIQ_BUSY:
 1945                         bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
 1946                             sizeof(struct mfi_qstat));
 1947                         break;
 1948                 default:
 1949                         error = ENOIOCTL;
 1950                         break;
 1951                 }
 1952                 break;
 1953         case MFIIO_QUERY_DISK:
 1954         {
 1955                 struct mfi_query_disk *qd;
 1956                 struct mfi_disk *ld;
 1957 
 1958                 qd = (struct mfi_query_disk *)arg;
 1959                 mtx_lock(&sc->mfi_io_lock);
 1960                 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
 1961                         if (ld->ld_id == qd->array_id)
 1962                                 break;
 1963                 }
 1964                 if (ld == NULL) {
 1965                         qd->present = 0;
 1966                         mtx_unlock(&sc->mfi_io_lock);
 1967                         return (0);
 1968                 }
 1969                 qd->present = 1;
 1970                 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
 1971                         qd->open = 1;
 1972                 bzero(qd->devname, SPECNAMELEN + 1);
 1973                 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
 1974                 mtx_unlock(&sc->mfi_io_lock);
 1975                 break;
 1976         }
 1977         case MFI_CMD:
 1978 #ifdef __amd64__
 1979         case MFI_CMD32:
 1980 #endif
 1981                 {
 1982                 devclass_t devclass;
 1983                 ioc = (struct mfi_ioc_packet *)arg;
 1984                 int adapter;
 1985 
 1986                 adapter = ioc->mfi_adapter_no;
 1987                 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
 1988                         devclass = devclass_find("mfi");
 1989                         sc = devclass_get_softc(devclass, adapter);
 1990                 }
 1991                 mtx_lock(&sc->mfi_io_lock);
 1992                 if ((cm = mfi_dequeue_free(sc)) == NULL) {
 1993                         mtx_unlock(&sc->mfi_io_lock);
 1994                         return (EBUSY);
 1995                 }
 1996                 mtx_unlock(&sc->mfi_io_lock);
 1997                 locked = 0;
 1998 
 1999                 /*
 2000                  * save off original context since copying from user
 2001                  * will clobber some data
 2002                  */
 2003                 context = cm->cm_frame->header.context;
 2004 
 2005                 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
 2006                     2 * MFI_DCMD_FRAME_SIZE);  /* this isn't quite right */
 2007                 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
 2008                     * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
 2009                 if (ioc->mfi_sge_count) {
 2010                         cm->cm_sg =
 2011                             (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
 2012                 }
 2013                 cm->cm_flags = 0;
 2014                 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
 2015                         cm->cm_flags |= MFI_CMD_DATAIN;
 2016                 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
 2017                         cm->cm_flags |= MFI_CMD_DATAOUT;
 2018                 /* Legacy app shim */
 2019                 if (cm->cm_flags == 0)
 2020                         cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
 2021                 cm->cm_len = cm->cm_frame->header.data_len;
 2022                 if (cm->cm_len &&
 2023                     (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
 2024                         cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
 2025                             M_WAITOK | M_ZERO);
 2026                         if (cm->cm_data == NULL) {
 2027                                 device_printf(sc->mfi_dev, "Malloc failed\n");
 2028                                 goto out;
 2029                         }
 2030                 } else {
 2031                         cm->cm_data = 0;
 2032                 }
 2033 
 2034                 /* restore header context */
 2035                 cm->cm_frame->header.context = context;
 2036 
 2037                 temp = data;
 2038                 if (cm->cm_flags & MFI_CMD_DATAOUT) {
 2039                         for (i = 0; i < ioc->mfi_sge_count; i++) {
 2040 #ifdef __amd64__
 2041                                 if (cmd == MFI_CMD) {
 2042                                         /* Native */
 2043                                         error = copyin(ioc->mfi_sgl[i].iov_base,
 2044                                                temp,
 2045                                                ioc->mfi_sgl[i].iov_len);
 2046                                 } else {
 2047                                         void *temp_convert;
 2048                                         /* 32bit */
 2049                                         ioc32 = (struct mfi_ioc_packet32 *)ioc;
 2050                                         temp_convert =
 2051                                             PTRIN(ioc32->mfi_sgl[i].iov_base);
 2052                                         error = copyin(temp_convert,
 2053                                                temp,
 2054                                                ioc32->mfi_sgl[i].iov_len);
 2055                                 }
 2056 #else
 2057                                 error = copyin(ioc->mfi_sgl[i].iov_base,
 2058                                        temp,
 2059                                        ioc->mfi_sgl[i].iov_len);
 2060 #endif
 2061                                 if (error != 0) {
 2062                                         device_printf(sc->mfi_dev,
 2063                                             "Copy in failed\n");
 2064                                         goto out;
 2065                                 }
 2066                                 temp = &temp[ioc->mfi_sgl[i].iov_len];
 2067                         }
 2068                 }
 2069 
 2070                 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
 2071                         locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
 2072 
 2073                 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
 2074                         cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
 2075                         cm->cm_frame->pass.sense_addr_hi = 0;
 2076                 }
 2077 
 2078                 mtx_lock(&sc->mfi_io_lock);
 2079                 error = mfi_check_command_pre(sc, cm);
 2080                 if (error) {
 2081                         mtx_unlock(&sc->mfi_io_lock);
 2082                         goto out;
 2083                 }
 2084 
 2085                 if ((error = mfi_wait_command(sc, cm)) != 0) {
 2086                         device_printf(sc->mfi_dev,
 2087                             "Controller polled failed\n");
 2088                         mtx_unlock(&sc->mfi_io_lock);
 2089                         goto out;
 2090                 }
 2091 
 2092                 mfi_check_command_post(sc, cm);
 2093                 mtx_unlock(&sc->mfi_io_lock);
 2094 
 2095                 temp = data;
 2096                 if (cm->cm_flags & MFI_CMD_DATAIN) {
 2097                         for (i = 0; i < ioc->mfi_sge_count; i++) {
 2098 #ifdef __amd64__
 2099                                 if (cmd == MFI_CMD) {
 2100                                         /* Native */
 2101                                         error = copyout(temp,
 2102                                                 ioc->mfi_sgl[i].iov_base,
 2103                                                 ioc->mfi_sgl[i].iov_len);
 2104                                 } else {
 2105                                         void *temp_convert;
 2106                                         /* 32bit */
 2107                                         ioc32 = (struct mfi_ioc_packet32 *)ioc;
 2108                                         temp_convert =
 2109                                             PTRIN(ioc32->mfi_sgl[i].iov_base);
 2110                                         error = copyout(temp,
 2111                                                 temp_convert,
 2112                                                 ioc32->mfi_sgl[i].iov_len);
 2113                                 }
 2114 #else
 2115                                 error = copyout(temp,
 2116                                         ioc->mfi_sgl[i].iov_base,
 2117                                         ioc->mfi_sgl[i].iov_len);
 2118 #endif
 2119                                 if (error != 0) {
 2120                                         device_printf(sc->mfi_dev,
 2121                                             "Copy out failed\n");
 2122                                         goto out;
 2123                                 }
 2124                                 temp = &temp[ioc->mfi_sgl[i].iov_len];
 2125                         }
 2126                 }
 2127 
 2128                 if (ioc->mfi_sense_len) {
 2129                         /* get user-space sense ptr then copy out sense */
 2130                         bcopy(&((struct mfi_ioc_packet*)arg)
 2131                             ->mfi_frame.raw[ioc->mfi_sense_off],
 2132                             &sense_ptr.sense_ptr_data[0],
 2133                             sizeof(sense_ptr.sense_ptr_data));
 2134 #ifdef __amd64__
 2135                         if (cmd != MFI_CMD) {
 2136                                 /*
 2137                                  * not 64bit native so zero out any address
 2138                                  * over 32bit */
 2139                                 sense_ptr.addr.high = 0;
 2140                         }
 2141 #endif
 2142                         error = copyout(cm->cm_sense, sense_ptr.user_space,
 2143                             ioc->mfi_sense_len);
 2144                         if (error != 0) {
 2145                                 device_printf(sc->mfi_dev,
 2146                                     "Copy out failed\n");
 2147                                 goto out;
 2148                         }
 2149                 }
 2150 
 2151                 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
 2152 out:
 2153                 mfi_config_unlock(sc, locked);
 2154                 if (data)
 2155                         free(data, M_MFIBUF);
 2156                 if (cm) {
 2157                         mtx_lock(&sc->mfi_io_lock);
 2158                         mfi_release_command(cm);
 2159                         mtx_unlock(&sc->mfi_io_lock);
 2160                 }
 2161 
 2162                 break;
 2163                 }
 2164         case MFI_SET_AEN:
 2165                 aen = (struct mfi_ioc_aen *)arg;
 2166                 error = mfi_aen_register(sc, aen->aen_seq_num,
 2167                     aen->aen_class_locale);
 2168 
 2169                 break;
 2170         case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
 2171                 {
 2172                         devclass_t devclass;
 2173                         struct mfi_linux_ioc_packet l_ioc;
 2174                         int adapter;
 2175 
 2176                         devclass = devclass_find("mfi");
 2177                         if (devclass == NULL)
 2178                                 return (ENOENT);
 2179 
 2180                         error = copyin(arg, &l_ioc, sizeof(l_ioc));
 2181                         if (error)
 2182                                 return (error);
 2183                         adapter = l_ioc.lioc_adapter_no;
 2184                         sc = devclass_get_softc(devclass, adapter);
 2185                         if (sc == NULL)
 2186                                 return (ENOENT);
 2187                         return (mfi_linux_ioctl_int(sc->mfi_cdev,
 2188                             cmd, arg, flag, td));
 2189                         break;
 2190                 }
 2191         case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
 2192                 {
 2193                         devclass_t devclass;
 2194                         struct mfi_linux_ioc_aen l_aen;
 2195                         int adapter;
 2196 
 2197                         devclass = devclass_find("mfi");
 2198                         if (devclass == NULL)
 2199                                 return (ENOENT);
 2200 
 2201                         error = copyin(arg, &l_aen, sizeof(l_aen));
 2202                         if (error)
 2203                                 return (error);
 2204                         adapter = l_aen.laen_adapter_no;
 2205                         sc = devclass_get_softc(devclass, adapter);
 2206                         if (sc == NULL)
 2207                                 return (ENOENT);
 2208                         return (mfi_linux_ioctl_int(sc->mfi_cdev,
 2209                             cmd, arg, flag, td));
 2210                         break;
 2211                 }
 2212 #ifdef __amd64__
 2213         case MFIIO_PASSTHRU32:
 2214                 iop_swab.ioc_frame      = iop32->ioc_frame;
 2215                 iop_swab.buf_size       = iop32->buf_size;
 2216                 iop_swab.buf            = PTRIN(iop32->buf);
 2217                 iop                     = &iop_swab;
 2218                 /* FALLTHROUGH */
 2219 #endif
 2220         case MFIIO_PASSTHRU:
 2221                 error = mfi_user_command(sc, iop);
 2222 #ifdef __amd64__
 2223                 if (cmd == MFIIO_PASSTHRU32)
 2224                         iop32->ioc_frame = iop_swab.ioc_frame;
 2225 #endif
 2226                 break;
 2227         default:
 2228                 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
 2229                 error = ENOENT;
 2230                 break;
 2231         }
 2232 
 2233         return (error);
 2234 }
 2235 
 2236 static int
 2237 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
 2238 {
 2239         struct mfi_softc *sc;
 2240         struct mfi_linux_ioc_packet l_ioc;
 2241         struct mfi_linux_ioc_aen l_aen;
 2242         struct mfi_command *cm = NULL;
 2243         struct mfi_aen *mfi_aen_entry;
 2244         union mfi_sense_ptr sense_ptr;
 2245         uint32_t context;
 2246         uint8_t *data = NULL, *temp;
 2247         int i;
 2248         int error, locked;
 2249 
 2250         sc = dev->si_drv1;
 2251         error = 0;
 2252         switch (cmd) {
 2253         case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
 2254                 error = copyin(arg, &l_ioc, sizeof(l_ioc));
 2255                 if (error != 0)
 2256                         return (error);
 2257 
 2258                 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
 2259                         return (EINVAL);
 2260                 }
 2261 
 2262                 mtx_lock(&sc->mfi_io_lock);
 2263                 if ((cm = mfi_dequeue_free(sc)) == NULL) {
 2264                         mtx_unlock(&sc->mfi_io_lock);
 2265                         return (EBUSY);
 2266                 }
 2267                 mtx_unlock(&sc->mfi_io_lock);
 2268                 locked = 0;
 2269 
 2270                 /*
 2271                  * save off original context since copying from user
 2272                  * will clobber some data
 2273                  */
 2274                 context = cm->cm_frame->header.context;
 2275 
 2276                 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
 2277                       2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
 2278                 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
 2279                       * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
 2280                 if (l_ioc.lioc_sge_count)
 2281                         cm->cm_sg =
 2282                             (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
 2283                 cm->cm_flags = 0;
 2284                 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
 2285                         cm->cm_flags |= MFI_CMD_DATAIN;
 2286                 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
 2287                         cm->cm_flags |= MFI_CMD_DATAOUT;
 2288                 cm->cm_len = cm->cm_frame->header.data_len;
 2289                 if (cm->cm_len &&
 2290                       (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
 2291                         cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
 2292                             M_WAITOK | M_ZERO);
 2293                         if (cm->cm_data == NULL) {
 2294                                 device_printf(sc->mfi_dev, "Malloc failed\n");
 2295                                 goto out;
 2296                         }
 2297                 } else {
 2298                         cm->cm_data = 0;
 2299                 }
 2300 
 2301                 /* restore header context */
 2302                 cm->cm_frame->header.context = context;
 2303 
 2304                 temp = data;
 2305                 if (cm->cm_flags & MFI_CMD_DATAOUT) {
 2306                         for (i = 0; i < l_ioc.lioc_sge_count; i++) {
 2307                                 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
 2308                                        temp,
 2309                                        l_ioc.lioc_sgl[i].iov_len);
 2310                                 if (error != 0) {
 2311                                         device_printf(sc->mfi_dev,
 2312                                             "Copy in failed\n");
 2313                                         goto out;
 2314                                 }
 2315                                 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
 2316                         }
 2317                 }
 2318 
 2319                 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
 2320                         locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
 2321 
 2322                 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
 2323                         cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
 2324                         cm->cm_frame->pass.sense_addr_hi = 0;
 2325                 }
 2326 
 2327                 mtx_lock(&sc->mfi_io_lock);
 2328                 error = mfi_check_command_pre(sc, cm);
 2329                 if (error) {
 2330                         mtx_unlock(&sc->mfi_io_lock);
 2331                         goto out;
 2332                 }
 2333 
 2334                 if ((error = mfi_wait_command(sc, cm)) != 0) {
 2335                         device_printf(sc->mfi_dev,
 2336                             "Controller polled failed\n");
 2337                         mtx_unlock(&sc->mfi_io_lock);
 2338                         goto out;
 2339                 }
 2340 
 2341                 mfi_check_command_post(sc, cm);
 2342                 mtx_unlock(&sc->mfi_io_lock);
 2343 
 2344                 temp = data;
 2345                 if (cm->cm_flags & MFI_CMD_DATAIN) {
 2346                         for (i = 0; i < l_ioc.lioc_sge_count; i++) {
 2347                                 error = copyout(temp,
 2348                                         PTRIN(l_ioc.lioc_sgl[i].iov_base),
 2349                                         l_ioc.lioc_sgl[i].iov_len);
 2350                                 if (error != 0) {
 2351                                         device_printf(sc->mfi_dev,
 2352                                             "Copy out failed\n");
 2353                                         goto out;
 2354                                 }
 2355                                 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
 2356                         }
 2357                 }
 2358 
 2359                 if (l_ioc.lioc_sense_len) {
 2360                         /* get user-space sense ptr then copy out sense */
 2361                         bcopy(&((struct mfi_linux_ioc_packet*)arg)
 2362                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
 2363                             &sense_ptr.sense_ptr_data[0],
 2364                             sizeof(sense_ptr.sense_ptr_data));
 2365 #ifdef __amd64__
 2366                         /*
 2367                          * only 32bit Linux support so zero out any
 2368                          * address over 32bit
 2369                          */
 2370                         sense_ptr.addr.high = 0;
 2371 #endif
 2372                         error = copyout(cm->cm_sense, sense_ptr.user_space,
 2373                             l_ioc.lioc_sense_len);
 2374                         if (error != 0) {
 2375                                 device_printf(sc->mfi_dev,
 2376                                     "Copy out failed\n");
 2377                                 goto out;
 2378                         }
 2379                 }
 2380 
 2381                 error = copyout(&cm->cm_frame->header.cmd_status,
 2382                         &((struct mfi_linux_ioc_packet*)arg)
 2383                         ->lioc_frame.hdr.cmd_status,
 2384                         1);
 2385                 if (error != 0) {
 2386                         device_printf(sc->mfi_dev,
 2387                                       "Copy out failed\n");
 2388                         goto out;
 2389                 }
 2390 
 2391 out:
 2392                 mfi_config_unlock(sc, locked);
 2393                 if (data)
 2394                         free(data, M_MFIBUF);
 2395                 if (cm) {
 2396                         mtx_lock(&sc->mfi_io_lock);
 2397                         mfi_release_command(cm);
 2398                         mtx_unlock(&sc->mfi_io_lock);
 2399                 }
 2400 
 2401                 return (error);
 2402         case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
 2403                 error = copyin(arg, &l_aen, sizeof(l_aen));
 2404                 if (error != 0)
 2405                         return (error);
 2406                 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
 2407                 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
 2408                     M_WAITOK);
 2409                 mtx_lock(&sc->mfi_io_lock);
 2410                 if (mfi_aen_entry != NULL) {
 2411                         mfi_aen_entry->p = curproc;
 2412                         TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
 2413                             aen_link);
 2414                 }
 2415                 error = mfi_aen_register(sc, l_aen.laen_seq_num,
 2416                     l_aen.laen_class_locale);
 2417 
 2418                 if (error != 0) {
 2419                         TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
 2420                             aen_link);
 2421                         free(mfi_aen_entry, M_MFIBUF);
 2422                 }
 2423                 mtx_unlock(&sc->mfi_io_lock);
 2424 
 2425                 return (error);
 2426         default:
 2427                 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
 2428                 error = ENOENT;
 2429                 break;
 2430         }
 2431 
 2432         return (error);
 2433 }
 2434 
 2435 static int
 2436 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
 2437 {
 2438         struct mfi_softc *sc;
 2439         int revents = 0;
 2440 
 2441         sc = dev->si_drv1;
 2442 
 2443         if (poll_events & (POLLIN | POLLRDNORM)) {
 2444                 if (sc->mfi_aen_triggered != 0) {
 2445                         revents |= poll_events & (POLLIN | POLLRDNORM);
 2446                         sc->mfi_aen_triggered = 0;
 2447                 }
 2448                 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
 2449                         revents |= POLLERR;
 2450                 }
 2451         }
 2452 
 2453         if (revents == 0) {
 2454                 if (poll_events & (POLLIN | POLLRDNORM)) {
 2455                         sc->mfi_poll_waiting = 1;
 2456                         selrecord(td, &sc->mfi_select);
 2457                 }
 2458         }
 2459 
 2460         return revents;
 2461 }
 2462 
 2463 
 2464 static void
 2465 mfi_dump_all(void)
 2466 {
 2467         struct mfi_softc *sc;
 2468         struct mfi_command *cm;
 2469         devclass_t dc;
 2470         time_t deadline;
 2471         int timedout;
 2472         int i;
 2473 
 2474         dc = devclass_find("mfi");
 2475         if (dc == NULL) {
 2476                 printf("No mfi dev class\n");
 2477                 return;
 2478         }
 2479 
 2480         for (i = 0; ; i++) {
 2481                 sc = devclass_get_softc(dc, i);
 2482                 if (sc == NULL)
 2483                         break;
 2484                 device_printf(sc->mfi_dev, "Dumping\n\n");
 2485                 timedout = 0;
 2486                 deadline = time_uptime - MFI_CMD_TIMEOUT;
 2487                 mtx_lock(&sc->mfi_io_lock);
 2488                 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
 2489                         if (cm->cm_timestamp < deadline) {
 2490                                 device_printf(sc->mfi_dev,
 2491                                     "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
 2492                                     (int)(time_uptime - cm->cm_timestamp));
 2493                                 MFI_PRINT_CMD(cm);
 2494                                 timedout++;
 2495                         }
 2496                 }
 2497 
 2498 #if 0
 2499                 if (timedout)
 2500                         MFI_DUMP_CMDS(SC);
 2501 #endif
 2502 
 2503                 mtx_unlock(&sc->mfi_io_lock);
 2504         }
 2505 
 2506         return;
 2507 }
 2508 
 2509 static void
 2510 mfi_timeout(void *data)
 2511 {
 2512         struct mfi_softc *sc = (struct mfi_softc *)data;
 2513         struct mfi_command *cm;
 2514         time_t deadline;
 2515         int timedout = 0;
 2516 
 2517         deadline = time_uptime - MFI_CMD_TIMEOUT;
 2518         mtx_lock(&sc->mfi_io_lock);
 2519         TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
 2520                 if (sc->mfi_aen_cm == cm)
 2521                         continue;
 2522                 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
 2523                         device_printf(sc->mfi_dev,
 2524                             "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
 2525                             (int)(time_uptime - cm->cm_timestamp));
 2526                         MFI_PRINT_CMD(cm);
 2527                         MFI_VALIDATE_CMD(sc, cm);
 2528                         timedout++;
 2529                 }
 2530         }
 2531 
 2532 #if 0
 2533         if (timedout)
 2534                 MFI_DUMP_CMDS(SC);
 2535 #endif
 2536 
 2537         mtx_unlock(&sc->mfi_io_lock);
 2538 
 2539         callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
 2540             mfi_timeout, sc);
 2541 
 2542         if (0)
 2543                 mfi_dump_all();
 2544         return;
 2545 }

Cache object: afa596a338e56b3a09fbbfe0174bebab


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.