The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/mfi.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* $NetBSD: mfi.c,v 1.79 2022/05/24 20:50:19 andvar Exp $ */
    2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */
    3 
    4 /*
    5  * Copyright (c) 2012 Manuel Bouyer.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   26  */
   27 
   28 /*
   29  * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us>
   30  *
   31  * Permission to use, copy, modify, and distribute this software for any
   32  * purpose with or without fee is hereby granted, provided that the above
   33  * copyright notice and this permission notice appear in all copies.
   34  *
   35  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   36  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   37  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
   38  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
   39  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   40  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   41  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   42  */
   43 
   44  /*-
   45  * Redistribution and use in source and binary forms, with or without
   46  * modification, are permitted provided that the following conditions
   47  * are met:
   48  *
   49  *            Copyright 1994-2009 The FreeBSD Project.
   50  *            All rights reserved.
   51  *
   52  * 1. Redistributions of source code must retain the above copyright
   53  *    notice, this list of conditions and the following disclaimer.
   54  * 2. Redistributions in binary form must reproduce the above copyright
   55  *    notice, this list of conditions and the following disclaimer in the
   56  *    documentation and/or other materials provided with the distribution.
   57  *
   58  *    THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
   59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
   60  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   61  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FREEBSD PROJECT OR
   62  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
   63  * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   64  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
   65  * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
   66  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
   67  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
   68  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   69  *
   70  * The views and conclusions contained in the software and documentation
   71  * are those of the authors and should not be interpreted as representing
   72  * official policies,either expressed or implied, of the FreeBSD Project.
   73  */
   74 
   75 #include <sys/cdefs.h>
   76 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.79 2022/05/24 20:50:19 andvar Exp $");
   77 
   78 #include "bio.h"
   79 
   80 #include <sys/param.h>
   81 #include <sys/systm.h>
   82 #include <sys/buf.h>
   83 #include <sys/ioctl.h>
   84 #include <sys/device.h>
   85 #include <sys/kernel.h>
   86 #include <sys/malloc.h>
   87 #include <sys/proc.h>
   88 #include <sys/cpu.h>
   89 #include <sys/conf.h>
   90 #include <sys/kauth.h>
   91 
   92 #include <uvm/uvm_param.h>
   93 
   94 #include <sys/bus.h>
   95 
   96 #include <dev/scsipi/scsipi_all.h>
   97 #include <dev/scsipi/scsi_all.h>
   98 #include <dev/scsipi/scsi_spc.h>
   99 #include <dev/scsipi/scsipi_disk.h>
  100 #include <dev/scsipi/scsi_disk.h>
  101 #include <dev/scsipi/scsiconf.h>
  102 
  103 #include <dev/ic/mfireg.h>
  104 #include <dev/ic/mfivar.h>
  105 #include <dev/ic/mfiio.h>
  106 
  107 #if NBIO > 0
  108 #include <dev/biovar.h>
  109 #endif /* NBIO > 0 */
  110 
  111 #include "ioconf.h"
  112 
  113 #ifdef MFI_DEBUG
  114 uint32_t        mfi_debug = 0
  115 /*                  | MFI_D_CMD */
  116 /*                  | MFI_D_INTR */
  117 /*                  | MFI_D_MISC */
  118 /*                  | MFI_D_DMA */
  119 /*                  | MFI_D_IOCTL */
  120 /*                  | MFI_D_RW */
  121 /*                  | MFI_D_MEM */
  122 /*                  | MFI_D_CCB */
  123 /*                  | MFI_D_SYNC */
  124                 ;
  125 #endif
  126 
  127 static void             mfi_scsipi_request(struct scsipi_channel *,
  128                                 scsipi_adapter_req_t, void *);
  129 static void             mfiminphys(struct buf *bp);
  130 
  131 static struct mfi_ccb   *mfi_get_ccb(struct mfi_softc *);
  132 static void             mfi_put_ccb(struct mfi_ccb *);
  133 static int              mfi_init_ccb(struct mfi_softc *);
  134 
  135 static struct mfi_mem   *mfi_allocmem(struct mfi_softc *, size_t);
  136 static void             mfi_freemem(struct mfi_softc *, struct mfi_mem **);
  137 
  138 static int              mfi_transition_firmware(struct mfi_softc *);
  139 static int              mfi_initialize_firmware(struct mfi_softc *);
  140 static int              mfi_get_info(struct mfi_softc *);
  141 static int              mfi_get_bbu(struct mfi_softc *,
  142                             struct mfi_bbu_status *);
  143 /* return codes for mfi_get_bbu */
  144 #define MFI_BBU_GOOD    0
  145 #define MFI_BBU_BAD     1
  146 #define MFI_BBU_UNKNOWN 2
  147 static uint32_t         mfi_read(struct mfi_softc *, bus_size_t);
  148 static void             mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
  149 static int              mfi_poll(struct mfi_ccb *);
  150 static int              mfi_create_sgl(struct mfi_ccb *, int);
  151 
  152 /* commands */
  153 static int              mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *);
  154 static int              mfi_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *,
  155                                 uint64_t, uint32_t);
  156 static void             mfi_scsi_ld_done(struct mfi_ccb *);
  157 static void             mfi_scsi_xs_done(struct mfi_ccb *, int, int);
  158 static int              mfi_mgmt_internal(struct mfi_softc *, uint32_t,
  159                             uint32_t, uint32_t, void *, const union mfi_mbox *,
  160                             bool);
  161 static int              mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *,
  162                             uint32_t, uint32_t, uint32_t, void *,
  163                             const union mfi_mbox *);
  164 static void             mfi_mgmt_done(struct mfi_ccb *);
  165 
  166 #if NBIO > 0
  167 static int              mfi_ioctl(device_t, u_long, void *);
  168 static int              mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
  169 static int              mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
  170 static int              mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
  171 static int              mfi_ioctl_alarm(struct mfi_softc *,
  172                                 struct bioc_alarm *);
  173 static int              mfi_ioctl_blink(struct mfi_softc *sc,
  174                                 struct bioc_blink *);
  175 static int              mfi_ioctl_setstate(struct mfi_softc *,
  176                                 struct bioc_setstate *);
  177 static int              mfi_bio_hs(struct mfi_softc *, int, int, void *);
  178 static int              mfi_create_sensors(struct mfi_softc *);
  179 static int              mfi_destroy_sensors(struct mfi_softc *);
  180 static void             mfi_sensor_refresh(struct sysmon_envsys *,
  181                                 envsys_data_t *);
  182 #endif /* NBIO > 0 */
  183 static bool             mfi_shutdown(device_t, int);
  184 static bool             mfi_suspend(device_t, const pmf_qual_t *);
  185 static bool             mfi_resume(device_t, const pmf_qual_t *);
  186 
  187 static dev_type_open(mfifopen);
  188 static dev_type_close(mfifclose);
  189 static dev_type_ioctl(mfifioctl);
  190 const struct cdevsw mfi_cdevsw = {
  191         .d_open = mfifopen,
  192         .d_close = mfifclose,
  193         .d_read = noread,
  194         .d_write = nowrite,
  195         .d_ioctl = mfifioctl,
  196         .d_stop = nostop,
  197         .d_tty = notty,
  198         .d_poll = nopoll,
  199         .d_mmap = nommap,
  200         .d_kqfilter = nokqfilter,
  201         .d_discard = nodiscard,
  202         .d_flag = D_OTHER
  203 };
  204 
  205 static uint32_t         mfi_xscale_fw_state(struct mfi_softc *sc);
  206 static void             mfi_xscale_intr_ena(struct mfi_softc *sc);
  207 static void             mfi_xscale_intr_dis(struct mfi_softc *sc);
  208 static int              mfi_xscale_intr(struct mfi_softc *sc);
  209 static void             mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
  210 
  211 static const struct mfi_iop_ops mfi_iop_xscale = {
  212         mfi_xscale_fw_state,
  213         mfi_xscale_intr_dis,
  214         mfi_xscale_intr_ena,
  215         mfi_xscale_intr,
  216         mfi_xscale_post,
  217         mfi_scsi_ld_io,
  218 };
  219 
  220 static uint32_t         mfi_ppc_fw_state(struct mfi_softc *sc);
  221 static void             mfi_ppc_intr_ena(struct mfi_softc *sc);
  222 static void             mfi_ppc_intr_dis(struct mfi_softc *sc);
  223 static int              mfi_ppc_intr(struct mfi_softc *sc);
  224 static void             mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
  225 
  226 static const struct mfi_iop_ops mfi_iop_ppc = {
  227         mfi_ppc_fw_state,
  228         mfi_ppc_intr_dis,
  229         mfi_ppc_intr_ena,
  230         mfi_ppc_intr,
  231         mfi_ppc_post,
  232         mfi_scsi_ld_io,
  233 };
  234 
  235 uint32_t        mfi_gen2_fw_state(struct mfi_softc *sc);
  236 void            mfi_gen2_intr_ena(struct mfi_softc *sc);
  237 void            mfi_gen2_intr_dis(struct mfi_softc *sc);
  238 int             mfi_gen2_intr(struct mfi_softc *sc);
  239 void            mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb);
  240 
  241 static const struct mfi_iop_ops mfi_iop_gen2 = {
  242         mfi_gen2_fw_state,
  243         mfi_gen2_intr_dis,
  244         mfi_gen2_intr_ena,
  245         mfi_gen2_intr,
  246         mfi_gen2_post,
  247         mfi_scsi_ld_io,
  248 };
  249 
  250 u_int32_t       mfi_skinny_fw_state(struct mfi_softc *);
  251 void            mfi_skinny_intr_dis(struct mfi_softc *);
  252 void            mfi_skinny_intr_ena(struct mfi_softc *);
  253 int             mfi_skinny_intr(struct mfi_softc *);
  254 void            mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
  255 
  256 static const struct mfi_iop_ops mfi_iop_skinny = {
  257         mfi_skinny_fw_state,
  258         mfi_skinny_intr_dis,
  259         mfi_skinny_intr_ena,
  260         mfi_skinny_intr,
  261         mfi_skinny_post,
  262         mfi_scsi_ld_io,
  263 };
  264 
  265 static int      mfi_tbolt_init_desc_pool(struct mfi_softc *);
  266 static int      mfi_tbolt_init_MFI_queue(struct mfi_softc *);
  267 static void     mfi_tbolt_build_mpt_ccb(struct mfi_ccb *);
  268 int             mfi_tbolt_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *,
  269                     uint64_t, uint32_t);
  270 static void     mfi_tbolt_scsi_ld_done(struct mfi_ccb *);
  271 static int      mfi_tbolt_create_sgl(struct mfi_ccb *, int);
  272 void            mfi_tbolt_sync_map_info(struct work *, void *);
  273 static void     mfi_sync_map_complete(struct mfi_ccb *);
  274 
  275 u_int32_t       mfi_tbolt_fw_state(struct mfi_softc *);
  276 void            mfi_tbolt_intr_dis(struct mfi_softc *);
  277 void            mfi_tbolt_intr_ena(struct mfi_softc *);
  278 int             mfi_tbolt_intr(struct mfi_softc *sc);
  279 void            mfi_tbolt_post(struct mfi_softc *, struct mfi_ccb *);
  280 
  281 static const struct mfi_iop_ops mfi_iop_tbolt = {
  282         mfi_tbolt_fw_state,
  283         mfi_tbolt_intr_dis,
  284         mfi_tbolt_intr_ena,
  285         mfi_tbolt_intr,
  286         mfi_tbolt_post,
  287         mfi_tbolt_scsi_ld_io,
  288 };
  289 
  290 #define mfi_fw_state(_s)        ((_s)->sc_iop->mio_fw_state(_s))
  291 #define mfi_intr_enable(_s)     ((_s)->sc_iop->mio_intr_ena(_s))
  292 #define mfi_intr_disable(_s)    ((_s)->sc_iop->mio_intr_dis(_s))
  293 #define mfi_my_intr(_s)         ((_s)->sc_iop->mio_intr(_s))
  294 #define mfi_post(_s, _c)        ((_s)->sc_iop->mio_post((_s), (_c)))
  295 
  296 static struct mfi_ccb *
  297 mfi_get_ccb(struct mfi_softc *sc)
  298 {
  299         struct mfi_ccb          *ccb;
  300         int                     s;
  301 
  302         s = splbio();
  303         ccb = TAILQ_FIRST(&sc->sc_ccb_freeq);
  304         if (ccb) {
  305                 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link);
  306                 ccb->ccb_state = MFI_CCB_READY;
  307         }
  308         splx(s);
  309 
  310         DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
  311         if (__predict_false(ccb == NULL && sc->sc_running))
  312                 aprint_error_dev(sc->sc_dev, "out of ccb\n");
  313 
  314         return ccb;
  315 }
  316 
  317 static void
  318 mfi_put_ccb(struct mfi_ccb *ccb)
  319 {
  320         struct mfi_softc        *sc = ccb->ccb_sc;
  321         struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
  322         int                     s;
  323 
  324         DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
  325 
  326         hdr->mfh_cmd_status = 0x0;
  327         hdr->mfh_flags = 0x0;
  328         ccb->ccb_state = MFI_CCB_FREE;
  329         ccb->ccb_xs = NULL;
  330         ccb->ccb_flags = 0;
  331         ccb->ccb_done = NULL;
  332         ccb->ccb_direction = 0;
  333         ccb->ccb_frame_size = 0;
  334         ccb->ccb_extra_frames = 0;
  335         ccb->ccb_sgl = NULL;
  336         ccb->ccb_data = NULL;
  337         ccb->ccb_len = 0;
  338         if (sc->sc_ioptype == MFI_IOP_TBOLT) {
  339                 /* erase tb_request_desc but preserve SMID */
  340                 int index = ccb->ccb_tb_request_desc.header.SMID;
  341                 ccb->ccb_tb_request_desc.words = 0;
  342                 ccb->ccb_tb_request_desc.header.SMID = index;
  343         }
  344         s = splbio();
  345         TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link);
  346         splx(s);
  347 }
  348 
  349 static int
  350 mfi_destroy_ccb(struct mfi_softc *sc)
  351 {
  352         struct mfi_ccb          *ccb;
  353         uint32_t                i;
  354 
  355         DNPRINTF(MFI_D_CCB, "%s: mfi_destroy_ccb\n", DEVNAME(sc));
  356 
  357 
  358         for (i = 0; (ccb = mfi_get_ccb(sc)) != NULL; i++) {
  359                 /* create a dma map for transfer */
  360                 bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap);
  361         }
  362 
  363         if (i < sc->sc_max_cmds)
  364                 return EBUSY;
  365 
  366         free(sc->sc_ccb, M_DEVBUF);
  367 
  368         return 0;
  369 }
  370 
  371 static int
  372 mfi_init_ccb(struct mfi_softc *sc)
  373 {
  374         struct mfi_ccb          *ccb;
  375         uint32_t                i;
  376         int                     error;
  377         bus_addr_t              io_req_base_phys;
  378         uint8_t                 *io_req_base;
  379         int offset;
  380 
  381         DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
  382 
  383         sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds,
  384             M_DEVBUF, M_WAITOK|M_ZERO);
  385         if (sc->sc_ioptype == MFI_IOP_TBOLT) {
  386                 /*
  387                  * The first 256 bytes (SMID 0) is not used.
  388                  * Don't add to the cmd list.
  389                  */
  390                 io_req_base = (uint8_t *)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool) +
  391                     MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
  392                 io_req_base_phys = MFIMEM_DVA(sc->sc_tbolt_reqmsgpool) +
  393                     MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
  394         } else {
  395                 io_req_base = NULL;     /* XXX: gcc */
  396                 io_req_base_phys = 0;   /* XXX: gcc */
  397         }
  398 
  399         for (i = 0; i < sc->sc_max_cmds; i++) {
  400                 ccb = &sc->sc_ccb[i];
  401 
  402                 ccb->ccb_sc = sc;
  403 
  404                 /* select i'th frame */
  405                 ccb->ccb_frame = (union mfi_frame *)
  406                     ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
  407                 ccb->ccb_pframe =
  408                     MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
  409                 ccb->ccb_frame->mfr_header.mfh_context = i;
  410 
  411                 /* select i'th sense */
  412                 ccb->ccb_sense = (struct mfi_sense *)
  413                     ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
  414                 ccb->ccb_psense =
  415                     (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
  416 
  417                 /* create a dma map for transfer */
  418                 error = bus_dmamap_create(sc->sc_datadmat,
  419                     MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
  420                     BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
  421                 if (error) {
  422                         aprint_error_dev(sc->sc_dev,
  423                             "cannot create ccb dmamap (%d)\n", error);
  424                         goto destroy;
  425                 }
  426                 if (sc->sc_ioptype == MFI_IOP_TBOLT) {
  427                         offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
  428                         ccb->ccb_tb_io_request =
  429                             (struct mfi_mpi2_request_raid_scsi_io *)
  430                             (io_req_base + offset);
  431                         ccb->ccb_tb_pio_request =
  432                             io_req_base_phys + offset;
  433                         offset = MEGASAS_MAX_SZ_CHAIN_FRAME * i;
  434                         ccb->ccb_tb_sg_frame =
  435                             (mpi2_sge_io_union *)(sc->sc_reply_pool_limit +
  436                             offset);
  437                         ccb->ccb_tb_psg_frame = sc->sc_sg_frame_busaddr +
  438                             offset;
  439                         /* SMID 0 is reserved. Set SMID/index from 1 */
  440                         ccb->ccb_tb_request_desc.header.SMID = i + 1;
  441                 }
  442 
  443                 DNPRINTF(MFI_D_CCB,
  444                     "ccb(%d): %p frame: %p (%#lx) sense: %p (%#lx) map: %p\n",
  445                     ccb->ccb_frame->mfr_header.mfh_context, ccb,
  446                     ccb->ccb_frame, (u_long)ccb->ccb_pframe,
  447                     ccb->ccb_sense, (u_long)ccb->ccb_psense,
  448                     ccb->ccb_dmamap);
  449 
  450                 /* add ccb to queue */
  451                 mfi_put_ccb(ccb);
  452         }
  453 
  454         return 0;
  455 destroy:
  456         /* free dma maps and ccb memory */
  457         while (i) {
  458                 i--;
  459                 ccb = &sc->sc_ccb[i];
  460                 bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap);
  461         }
  462 
  463         free(sc->sc_ccb, M_DEVBUF);
  464 
  465         return 1;
  466 }
  467 
  468 static uint32_t
  469 mfi_read(struct mfi_softc *sc, bus_size_t r)
  470 {
  471         uint32_t rv;
  472 
  473         bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
  474             BUS_SPACE_BARRIER_READ);
  475         rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
  476 
  477         DNPRINTF(MFI_D_RW, "%s: mr %#zx 0x08%x ", DEVNAME(sc), r, rv);
  478         return rv;
  479 }
  480 
  481 static void
  482 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
  483 {
  484         DNPRINTF(MFI_D_RW, "%s: mw %#zx 0x%08x", DEVNAME(sc), r, v);
  485 
  486         bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
  487         bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
  488             BUS_SPACE_BARRIER_WRITE);
  489 }
  490 
  491 static struct mfi_mem *
  492 mfi_allocmem(struct mfi_softc *sc, size_t size)
  493 {
  494         struct mfi_mem          *mm;
  495         int                     nsegs;
  496 
  497         DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %zu\n", DEVNAME(sc),
  498             size);
  499 
  500         mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_WAITOK|M_ZERO);
  501         mm->am_size = size;
  502 
  503         if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
  504             BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
  505                 goto amfree;
  506 
  507         if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
  508             &nsegs, BUS_DMA_NOWAIT) != 0)
  509                 goto destroy;
  510 
  511         if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
  512             BUS_DMA_NOWAIT) != 0)
  513                 goto free;
  514 
  515         if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
  516             BUS_DMA_NOWAIT) != 0)
  517                 goto unmap;
  518 
  519         DNPRINTF(MFI_D_MEM, "  kva: %p  dva: %" PRIxBUSADDR "  map: %p\n",
  520             mm->am_kva, mm->am_map->dm_segs[0].ds_addr, mm->am_map);
  521 
  522         memset(mm->am_kva, 0, size);
  523         return mm;
  524 
  525 unmap:
  526         bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
  527 free:
  528         bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
  529 destroy:
  530         bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
  531 amfree:
  532         free(mm, M_DEVBUF);
  533 
  534         return NULL;
  535 }
  536 
  537 static void
  538 mfi_freemem(struct mfi_softc *sc, struct mfi_mem **mmp)
  539 {
  540         struct mfi_mem *mm = *mmp;
  541 
  542         if (mm == NULL)
  543                 return;
  544 
  545         *mmp = NULL;
  546 
  547         DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
  548 
  549         bus_dmamap_unload(sc->sc_dmat, mm->am_map);
  550         bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
  551         bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
  552         bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
  553         free(mm, M_DEVBUF);
  554 }
  555 
  556 static int
  557 mfi_transition_firmware(struct mfi_softc *sc)
  558 {
  559         uint32_t                fw_state, cur_state;
  560         int                     max_wait, i;
  561 
  562         fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
  563 
  564         DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
  565             fw_state);
  566 
  567         while (fw_state != MFI_STATE_READY) {
  568                 DNPRINTF(MFI_D_MISC,
  569                     "%s: waiting for firmware to become ready\n",
  570                     DEVNAME(sc));
  571                 cur_state = fw_state;
  572                 switch (fw_state) {
  573                 case MFI_STATE_FAULT:
  574                         aprint_error_dev(sc->sc_dev, "firmware fault\n");
  575                         return 1;
  576                 case MFI_STATE_WAIT_HANDSHAKE:
  577                         if (sc->sc_ioptype == MFI_IOP_SKINNY ||
  578                             sc->sc_ioptype == MFI_IOP_TBOLT)
  579                                 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_CLEAR_HANDSHAKE);
  580                         else
  581                                 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE);
  582                         max_wait = 2;
  583                         break;
  584                 case MFI_STATE_OPERATIONAL:
  585                         if (sc->sc_ioptype == MFI_IOP_SKINNY ||
  586                             sc->sc_ioptype == MFI_IOP_TBOLT)
  587                                 mfi_write(sc, MFI_SKINNY_IDB, MFI_RESET_FLAGS);
  588                         else
  589                                 mfi_write(sc, MFI_IDB, MFI_INIT_READY);
  590                         max_wait = 10;
  591                         break;
  592                 case MFI_STATE_UNDEFINED:
  593                 case MFI_STATE_BB_INIT:
  594                         max_wait = 2;
  595                         break;
  596                 case MFI_STATE_FW_INIT:
  597                 case MFI_STATE_DEVICE_SCAN:
  598                 case MFI_STATE_FLUSH_CACHE:
  599                         max_wait = 20;
  600                         break;
  601                 case MFI_STATE_BOOT_MESSAGE_PENDING:
  602                         if (sc->sc_ioptype == MFI_IOP_SKINNY ||
  603                             sc->sc_ioptype == MFI_IOP_TBOLT) {
  604                                 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_HOTPLUG);
  605                         } else {
  606                                 mfi_write(sc, MFI_IDB, MFI_INIT_HOTPLUG);
  607                         }
  608                         max_wait = 180;
  609                         break;
  610                 default:
  611                         aprint_error_dev(sc->sc_dev,
  612                             "unknown firmware state %d\n", fw_state);
  613                         return 1;
  614                 }
  615                 for (i = 0; i < (max_wait * 10); i++) {
  616                         fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
  617                         if (fw_state == cur_state)
  618                                 DELAY(100000);
  619                         else
  620                                 break;
  621                 }
  622                 if (fw_state == cur_state) {
  623                         aprint_error_dev(sc->sc_dev,
  624                             "firmware stuck in state %#x\n", fw_state);
  625                         return 1;
  626                 }
  627         }
  628 
  629         return 0;
  630 }
  631 
  632 static int
  633 mfi_initialize_firmware(struct mfi_softc *sc)
  634 {
  635         struct mfi_ccb          *ccb;
  636         struct mfi_init_frame   *init;
  637         struct mfi_init_qinfo   *qinfo;
  638 
  639         DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
  640 
  641         if ((ccb = mfi_get_ccb(sc)) == NULL)
  642                 return 1;
  643 
  644         init = &ccb->ccb_frame->mfr_init;
  645         qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
  646 
  647         memset(qinfo, 0, sizeof *qinfo);
  648         qinfo->miq_rq_entries = sc->sc_max_cmds + 1;
  649         qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
  650             offsetof(struct mfi_prod_cons, mpc_reply_q));
  651         qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
  652             offsetof(struct mfi_prod_cons, mpc_producer));
  653         qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) +
  654             offsetof(struct mfi_prod_cons, mpc_consumer));
  655 
  656         init->mif_header.mfh_cmd = MFI_CMD_INIT;
  657         init->mif_header.mfh_data_len = sizeof *qinfo;
  658         init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE);
  659 
  660         DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n",
  661             DEVNAME(sc),
  662             qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo,
  663             qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo);
  664 
  665         if (mfi_poll(ccb)) {
  666                 aprint_error_dev(sc->sc_dev,
  667                     "mfi_initialize_firmware failed\n");
  668                 return 1;
  669         }
  670 
  671         mfi_put_ccb(ccb);
  672 
  673         return 0;
  674 }
  675 
  676 static int
  677 mfi_get_info(struct mfi_softc *sc)
  678 {
  679 #ifdef MFI_DEBUG
  680         int i;
  681 #endif
  682         DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
  683 
  684         if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
  685             sizeof(sc->sc_info), &sc->sc_info, NULL, cold ? true : false))
  686                 return 1;
  687 
  688 #ifdef MFI_DEBUG
  689         for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
  690                 printf("%s: active FW %s Version %s date %s time %s\n",
  691                     DEVNAME(sc),
  692                     sc->sc_info.mci_image_component[i].mic_name,
  693                     sc->sc_info.mci_image_component[i].mic_version,
  694                     sc->sc_info.mci_image_component[i].mic_build_date,
  695                     sc->sc_info.mci_image_component[i].mic_build_time);
  696         }
  697 
  698         for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
  699                 printf("%s: pending FW %s Version %s date %s time %s\n",
  700                     DEVNAME(sc),
  701                     sc->sc_info.mci_pending_image_component[i].mic_name,
  702                     sc->sc_info.mci_pending_image_component[i].mic_version,
  703                     sc->sc_info.mci_pending_image_component[i].mic_build_date,
  704                     sc->sc_info.mci_pending_image_component[i].mic_build_time);
  705         }
  706 
  707         printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
  708             DEVNAME(sc),
  709             sc->sc_info.mci_max_arms,
  710             sc->sc_info.mci_max_spans,
  711             sc->sc_info.mci_max_arrays,
  712             sc->sc_info.mci_max_lds,
  713             sc->sc_info.mci_product_name);
  714 
  715         printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
  716             DEVNAME(sc),
  717             sc->sc_info.mci_serial_number,
  718             sc->sc_info.mci_hw_present,
  719             sc->sc_info.mci_current_fw_time,
  720             sc->sc_info.mci_max_cmds,
  721             sc->sc_info.mci_max_sg_elements);
  722 
  723         printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
  724             DEVNAME(sc),
  725             sc->sc_info.mci_max_request_size,
  726             sc->sc_info.mci_lds_present,
  727             sc->sc_info.mci_lds_degraded,
  728             sc->sc_info.mci_lds_offline,
  729             sc->sc_info.mci_pd_present);
  730 
  731         printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
  732             DEVNAME(sc),
  733             sc->sc_info.mci_pd_disks_present,
  734             sc->sc_info.mci_pd_disks_pred_failure,
  735             sc->sc_info.mci_pd_disks_failed);
  736 
  737         printf("%s: nvram %d mem %d flash %d\n",
  738             DEVNAME(sc),
  739             sc->sc_info.mci_nvram_size,
  740             sc->sc_info.mci_memory_size,
  741             sc->sc_info.mci_flash_size);
  742 
  743         printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
  744             DEVNAME(sc),
  745             sc->sc_info.mci_ram_correctable_errors,
  746             sc->sc_info.mci_ram_uncorrectable_errors,
  747             sc->sc_info.mci_cluster_allowed,
  748             sc->sc_info.mci_cluster_active);
  749 
  750         printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
  751             DEVNAME(sc),
  752             sc->sc_info.mci_max_strips_per_io,
  753             sc->sc_info.mci_raid_levels,
  754             sc->sc_info.mci_adapter_ops,
  755             sc->sc_info.mci_ld_ops);
  756 
  757         printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
  758             DEVNAME(sc),
  759             sc->sc_info.mci_stripe_sz_ops.min,
  760             sc->sc_info.mci_stripe_sz_ops.max,
  761             sc->sc_info.mci_pd_ops,
  762             sc->sc_info.mci_pd_mix_support);
  763 
  764         printf("%s: ecc_bucket %d pckg_prop %s\n",
  765             DEVNAME(sc),
  766             sc->sc_info.mci_ecc_bucket_count,
  767             sc->sc_info.mci_package_version);
  768 
  769         printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
  770             DEVNAME(sc),
  771             sc->sc_info.mci_properties.mcp_seq_num,
  772             sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
  773             sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
  774             sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
  775 
  776         printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
  777             DEVNAME(sc),
  778             sc->sc_info.mci_properties.mcp_rebuild_rate,
  779             sc->sc_info.mci_properties.mcp_patrol_read_rate,
  780             sc->sc_info.mci_properties.mcp_bgi_rate,
  781             sc->sc_info.mci_properties.mcp_cc_rate);
  782 
  783         printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
  784             DEVNAME(sc),
  785             sc->sc_info.mci_properties.mcp_recon_rate,
  786             sc->sc_info.mci_properties.mcp_cache_flush_interval,
  787             sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
  788             sc->sc_info.mci_properties.mcp_spinup_delay,
  789             sc->sc_info.mci_properties.mcp_cluster_enable);
  790 
  791         printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
  792             DEVNAME(sc),
  793             sc->sc_info.mci_properties.mcp_coercion_mode,
  794             sc->sc_info.mci_properties.mcp_alarm_enable,
  795             sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
  796             sc->sc_info.mci_properties.mcp_disable_battery_warn,
  797             sc->sc_info.mci_properties.mcp_ecc_bucket_size);
  798 
  799         printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
  800             DEVNAME(sc),
  801             sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
  802             sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
  803             sc->sc_info.mci_properties.mcp_expose_encl_devices);
  804 
  805         printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
  806             DEVNAME(sc),
  807             sc->sc_info.mci_pci.mip_vendor,
  808             sc->sc_info.mci_pci.mip_device,
  809             sc->sc_info.mci_pci.mip_subvendor,
  810             sc->sc_info.mci_pci.mip_subdevice);
  811 
  812         printf("%s: type %#x port_count %d port_addr ",
  813             DEVNAME(sc),
  814             sc->sc_info.mci_host.mih_type,
  815             sc->sc_info.mci_host.mih_port_count);
  816 
  817         for (i = 0; i < 8; i++)
  818                 printf("%.0" PRIx64 " ", sc->sc_info.mci_host.mih_port_addr[i]);
  819         printf("\n");
  820 
  821         printf("%s: type %.x port_count %d port_addr ",
  822             DEVNAME(sc),
  823             sc->sc_info.mci_device.mid_type,
  824             sc->sc_info.mci_device.mid_port_count);
  825 
  826         for (i = 0; i < 8; i++) {
  827                 printf("%.0" PRIx64 " ",
  828                     sc->sc_info.mci_device.mid_port_addr[i]);
  829         }
  830         printf("\n");
  831 #endif /* MFI_DEBUG */
  832 
  833         return 0;
  834 }
  835 
  836 static int
  837 mfi_get_bbu(struct mfi_softc *sc, struct mfi_bbu_status *stat)
  838 {
  839         DNPRINTF(MFI_D_MISC, "%s: mfi_get_bbu\n", DEVNAME(sc));
  840 
  841         if (mfi_mgmt_internal(sc, MR_DCMD_BBU_GET_STATUS, MFI_DATA_IN,
  842             sizeof(*stat), stat, NULL, cold ? true : false))
  843                 return MFI_BBU_UNKNOWN;
  844 #ifdef MFI_DEBUG
  845         printf("bbu type %d, voltage %d, current %d, temperature %d, "
  846             "status 0x%x\n", stat->battery_type, stat->voltage, stat->current,
  847             stat->temperature, stat->fw_status);
  848         printf("details: ");
  849         switch (stat->battery_type) {
  850         case MFI_BBU_TYPE_IBBU:
  851                 printf("guage %d relative charge %d charger state %d "
  852                     "charger ctrl %d\n", stat->detail.ibbu.gas_guage_status,
  853                     stat->detail.ibbu.relative_charge ,
  854                     stat->detail.ibbu.charger_system_state ,
  855                     stat->detail.ibbu.charger_system_ctrl);
  856                 printf("\tcurrent %d abs charge %d max error %d\n",
  857                     stat->detail.ibbu.charging_current ,
  858                     stat->detail.ibbu.absolute_charge ,
  859                     stat->detail.ibbu.max_error);
  860                 break;
  861         case MFI_BBU_TYPE_BBU:
  862                 printf("guage %d relative charge %d charger state %d\n",
  863                     stat->detail.ibbu.gas_guage_status,
  864                     stat->detail.bbu.relative_charge ,
  865                     stat->detail.bbu.charger_status );
  866                 printf("\trem capacity %d fyll capacity %d SOH %d\n",
  867                     stat->detail.bbu.remaining_capacity ,
  868                     stat->detail.bbu.full_charge_capacity ,
  869                     stat->detail.bbu.is_SOH_good);
  870                 break;
  871         default:
  872                 printf("\n");
  873         }
  874 #endif
  875         switch (stat->battery_type) {
  876         case MFI_BBU_TYPE_BBU:
  877                 return (stat->detail.bbu.is_SOH_good ?
  878                     MFI_BBU_GOOD : MFI_BBU_BAD);
  879         case MFI_BBU_TYPE_NONE:
  880                 return MFI_BBU_UNKNOWN;
  881         default:
  882                 if (stat->fw_status &
  883                     (MFI_BBU_STATE_PACK_MISSING |
  884                      MFI_BBU_STATE_VOLTAGE_LOW |
  885                      MFI_BBU_STATE_TEMPERATURE_HIGH |
  886                      MFI_BBU_STATE_LEARN_CYC_FAIL |
  887                      MFI_BBU_STATE_LEARN_CYC_TIMEOUT |
  888                      MFI_BBU_STATE_I2C_ERR_DETECT))
  889                         return MFI_BBU_BAD;
  890                 return MFI_BBU_GOOD;
  891         }
  892 }
  893 
  894 static void
  895 mfiminphys(struct buf *bp)
  896 {
  897         DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
  898 
  899         /* XXX currently using MFI_MAXFER = MAXPHYS */
  900         if (bp->b_bcount > MFI_MAXFER)
  901                 bp->b_bcount = MFI_MAXFER;
  902         minphys(bp);
  903 }
  904 
  905 int
  906 mfi_rescan(device_t self, const char *ifattr, const int *locators)
  907 {
  908         struct mfi_softc *sc = device_private(self);
  909 
  910         if (sc->sc_child != NULL)
  911                 return 0;
  912 
  913         sc->sc_child = config_found(self, &sc->sc_chan, scsiprint, CFARGS_NONE);
  914 
  915         return 0;
  916 }
  917 
  918 void
  919 mfi_childdetached(device_t self, device_t child)
  920 {
  921         struct mfi_softc *sc = device_private(self);
  922 
  923         KASSERT(self == sc->sc_dev);
  924         KASSERT(child == sc->sc_child);
  925 
  926         if (child == sc->sc_child)
  927                 sc->sc_child = NULL;
  928 }
  929 
  930 int
  931 mfi_detach(struct mfi_softc *sc, int flags)
  932 {
  933         int                     error;
  934 
  935         DNPRINTF(MFI_D_MISC, "%s: mfi_detach\n", DEVNAME(sc));
  936 
  937         if ((error = config_detach_children(sc->sc_dev, flags)) != 0)
  938                 return error;
  939 
  940 #if NBIO > 0
  941         mfi_destroy_sensors(sc);
  942         bio_unregister(sc->sc_dev);
  943 #endif /* NBIO > 0 */
  944 
  945         mfi_intr_disable(sc);
  946         mfi_shutdown(sc->sc_dev, 0);
  947 
  948         if (sc->sc_ioptype == MFI_IOP_TBOLT) {
  949                 workqueue_destroy(sc->sc_ldsync_wq);
  950                 mfi_put_ccb(sc->sc_ldsync_ccb);
  951                 mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool);
  952                 mfi_freemem(sc, &sc->sc_tbolt_ioc_init);
  953                 mfi_freemem(sc, &sc->sc_tbolt_verbuf);
  954         }
  955 
  956         if ((error = mfi_destroy_ccb(sc)) != 0)
  957                 return error;
  958 
  959         mfi_freemem(sc, &sc->sc_sense);
  960 
  961         mfi_freemem(sc, &sc->sc_frames);
  962 
  963         mfi_freemem(sc, &sc->sc_pcq);
  964 
  965         return 0;
  966 }
  967 
  968 static bool
  969 mfi_shutdown(device_t dev, int how)
  970 {
  971         struct mfi_softc        *sc = device_private(dev);
  972         union mfi_mbox          mbox;
  973         int s = splbio();
  974 
  975         DNPRINTF(MFI_D_MISC, "%s: mfi_shutdown\n", DEVNAME(sc));
  976         if (sc->sc_running) {
  977                 memset(&mbox, 0, sizeof(mbox));
  978                 mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
  979                 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_CACHE_FLUSH,
  980                     MFI_DATA_NONE, 0, NULL, &mbox, true)) {
  981                         aprint_error_dev(dev, "shutdown: cache flush failed\n");
  982                         goto fail;
  983                 }
  984 
  985                 mbox.b[0] = 0;
  986                 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_SHUTDOWN,
  987                     MFI_DATA_NONE, 0, NULL, &mbox, true)) {
  988                         aprint_error_dev(dev, "shutdown: "
  989                             "firmware shutdown failed\n");
  990                         goto fail;
  991                 }
  992                 sc->sc_running = false;
  993         }
  994         splx(s);
  995         return true;
  996 fail:
  997         splx(s);
  998         return false;
  999 }
 1000 
 1001 static bool
 1002 mfi_suspend(device_t dev, const pmf_qual_t *q)
 1003 {
 1004         /* XXX to be implemented */
 1005         return false;
 1006 }
 1007 
 1008 static bool
 1009 mfi_resume(device_t dev, const pmf_qual_t *q)
 1010 {
 1011         /* XXX to be implemented */
 1012         return false;
 1013 }
 1014 
 1015 int
 1016 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
 1017 {
 1018         struct scsipi_adapter *adapt = &sc->sc_adapt;
 1019         struct scsipi_channel *chan = &sc->sc_chan;
 1020         uint32_t                status, frames, max_sgl;
 1021         int                     i;
 1022 
 1023         DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
 1024 
 1025         sc->sc_ioptype = iop;
 1026 
 1027         switch (iop) {
 1028         case MFI_IOP_XSCALE:
 1029                 sc->sc_iop = &mfi_iop_xscale;
 1030                 break;
 1031         case MFI_IOP_PPC:
 1032                 sc->sc_iop = &mfi_iop_ppc;
 1033                 break;
 1034         case MFI_IOP_GEN2:
 1035                 sc->sc_iop = &mfi_iop_gen2;
 1036                 break;
 1037         case MFI_IOP_SKINNY:
 1038                 sc->sc_iop = &mfi_iop_skinny;
 1039                 break;
 1040         case MFI_IOP_TBOLT:
 1041                 sc->sc_iop = &mfi_iop_tbolt;
 1042                 break;
 1043         default:
 1044                 panic("%s: unknown iop %d", DEVNAME(sc), iop);
 1045         }
 1046 
 1047         if (mfi_transition_firmware(sc))
 1048                 return 1;
 1049 
 1050         TAILQ_INIT(&sc->sc_ccb_freeq);
 1051 
 1052         status = mfi_fw_state(sc);
 1053         sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
 1054         max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
 1055         if (sc->sc_ioptype == MFI_IOP_TBOLT) {
 1056                 sc->sc_max_sgl = uimin(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
 1057                 sc->sc_sgl_size = sizeof(struct mfi_sg_ieee);
 1058         } else if (sc->sc_64bit_dma) {
 1059                 sc->sc_max_sgl = uimin(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
 1060                 sc->sc_sgl_size = sizeof(struct mfi_sg64);
 1061         } else {
 1062                 sc->sc_max_sgl = max_sgl;
 1063                 sc->sc_sgl_size = sizeof(struct mfi_sg32);
 1064         }
 1065         if (sc->sc_ioptype == MFI_IOP_SKINNY)
 1066                 sc->sc_sgl_size = sizeof(struct mfi_sg_ieee);
 1067         DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n",
 1068             DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl);
 1069 
 1070         if (sc->sc_ioptype == MFI_IOP_TBOLT) {
 1071                 uint32_t tb_mem_size;
 1072                 /* for Alignment */
 1073                 tb_mem_size = MEGASAS_THUNDERBOLT_MSG_ALIGNMENT;
 1074 
 1075                 tb_mem_size +=
 1076                     MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1);
 1077                 sc->sc_reply_pool_size =
 1078                     ((sc->sc_max_cmds + 1 + 15) / 16) * 16;
 1079                 tb_mem_size +=
 1080                     MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
 1081 
 1082                 /* this is for SGL's */
 1083                 tb_mem_size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->sc_max_cmds;
 1084                 sc->sc_tbolt_reqmsgpool = mfi_allocmem(sc, tb_mem_size);
 1085                 if (sc->sc_tbolt_reqmsgpool == NULL) {
 1086                         aprint_error_dev(sc->sc_dev,
 1087                             "unable to allocate thunderbolt "
 1088                             "request message pool\n");
 1089                         goto nopcq;
 1090                 }
 1091                 if (mfi_tbolt_init_desc_pool(sc)) {
 1092                         aprint_error_dev(sc->sc_dev,
 1093                             "Thunderbolt pool preparation error\n");
 1094                         goto nopcq;
 1095                 }
 1096 
 1097                 /*
 1098                  * Allocate DMA memory mapping for MPI2 IOC Init descriptor,
 1099                  * we are taking it different from what we have allocated for
 1100                  * Request and reply descriptors to avoid confusion later
 1101                  */
 1102                 sc->sc_tbolt_ioc_init = mfi_allocmem(sc,
 1103                     sizeof(struct mpi2_ioc_init_request));
 1104                 if (sc->sc_tbolt_ioc_init == NULL) {
 1105                         aprint_error_dev(sc->sc_dev,
 1106                             "unable to allocate thunderbolt IOC init memory");
 1107                         goto nopcq;
 1108                 }
 1109 
 1110                 sc->sc_tbolt_verbuf = mfi_allocmem(sc,
 1111                     MEGASAS_MAX_NAME*sizeof(bus_addr_t));
 1112                 if (sc->sc_tbolt_verbuf == NULL) {
 1113                         aprint_error_dev(sc->sc_dev,
 1114                             "unable to allocate thunderbolt version buffer\n");
 1115                         goto nopcq;
 1116                 }
 1117 
 1118         }
 1119         /* consumer/producer and reply queue memory */
 1120         sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
 1121             sizeof(struct mfi_prod_cons));
 1122         if (sc->sc_pcq == NULL) {
 1123                 aprint_error_dev(sc->sc_dev,
 1124                     "unable to allocate reply queue memory\n");
 1125                 goto nopcq;
 1126         }
 1127         bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
 1128             sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
 1129             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1130 
 1131         /* frame memory */
 1132         frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) /
 1133             MFI_FRAME_SIZE + 1;
 1134         sc->sc_frames_size = frames * MFI_FRAME_SIZE;
 1135         sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
 1136         if (sc->sc_frames == NULL) {
 1137                 aprint_error_dev(sc->sc_dev,
 1138                     "unable to allocate frame memory\n");
 1139                 goto noframe;
 1140         }
 1141         /* XXX hack, fix this */
 1142         if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
 1143                 aprint_error_dev(sc->sc_dev,
 1144                     "improper frame alignment (%#llx) FIXME\n",
 1145                     (long long int)MFIMEM_DVA(sc->sc_frames));
 1146                 goto noframe;
 1147         }
 1148 
 1149         /* sense memory */
 1150         sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
 1151         if (sc->sc_sense == NULL) {
 1152                 aprint_error_dev(sc->sc_dev,
 1153                     "unable to allocate sense memory\n");
 1154                 goto nosense;
 1155         }
 1156 
 1157         /* now that we have all memory bits go initialize ccbs */
 1158         if (mfi_init_ccb(sc)) {
 1159                 aprint_error_dev(sc->sc_dev, "could not init ccb list\n");
 1160                 goto noinit;
 1161         }
 1162 
 1163         /* kickstart firmware with all addresses and pointers */
 1164         if (sc->sc_ioptype == MFI_IOP_TBOLT) {
 1165                 if (mfi_tbolt_init_MFI_queue(sc)) {
 1166                         aprint_error_dev(sc->sc_dev,
 1167                             "could not initialize firmware\n");
 1168                         goto noinit;
 1169                 }
 1170         } else {
 1171                 if (mfi_initialize_firmware(sc)) {
 1172                         aprint_error_dev(sc->sc_dev,
 1173                             "could not initialize firmware\n");
 1174                         goto noinit;
 1175                 }
 1176         }
 1177         sc->sc_running = true;
 1178 
 1179         if (mfi_get_info(sc)) {
 1180                 aprint_error_dev(sc->sc_dev,
 1181                     "could not retrieve controller information\n");
 1182                 goto noinit;
 1183         }
 1184         aprint_normal_dev(sc->sc_dev,
 1185             "%s version %s\n",
 1186             sc->sc_info.mci_product_name,
 1187             sc->sc_info.mci_package_version);
 1188 
 1189 
 1190         aprint_normal_dev(sc->sc_dev, "logical drives %d, %dMB RAM, ",
 1191             sc->sc_info.mci_lds_present,
 1192             sc->sc_info.mci_memory_size);
 1193         sc->sc_bbuok = false;
 1194         if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) {
 1195                 struct mfi_bbu_status   bbu_stat;
 1196                 int mfi_bbu_status = mfi_get_bbu(sc, &bbu_stat);
 1197                 aprint_normal("BBU type ");
 1198                 switch (bbu_stat.battery_type) {
 1199                 case MFI_BBU_TYPE_BBU:
 1200                         aprint_normal("BBU");
 1201                         break;
 1202                 case MFI_BBU_TYPE_IBBU:
 1203                         aprint_normal("IBBU");
 1204                         break;
 1205                 default:
 1206                         aprint_normal("unknown type %d", bbu_stat.battery_type);
 1207                 }
 1208                 aprint_normal(", status ");
 1209                 switch (mfi_bbu_status) {
 1210                 case MFI_BBU_GOOD:
 1211                         aprint_normal("good\n");
 1212                         sc->sc_bbuok = true;
 1213                         break;
 1214                 case MFI_BBU_BAD:
 1215                         aprint_normal("bad\n");
 1216                         break;
 1217                 case MFI_BBU_UNKNOWN:
 1218                         aprint_normal("unknown\n");
 1219                         break;
 1220                 default:
 1221                         panic("mfi_bbu_status");
 1222                 }
 1223         } else {
 1224                 aprint_normal("BBU not present\n");
 1225         }
 1226 
 1227         sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
 1228         sc->sc_max_ld = sc->sc_ld_cnt;
 1229         for (i = 0; i < sc->sc_ld_cnt; i++)
 1230                 sc->sc_ld[i].ld_present = 1;
 1231 
 1232         memset(adapt, 0, sizeof(*adapt));
 1233         adapt->adapt_dev = sc->sc_dev;
 1234         adapt->adapt_nchannels = 1;
 1235         /* keep a few commands for management */
 1236         if (sc->sc_max_cmds > 4)
 1237                 adapt->adapt_openings = sc->sc_max_cmds - 4;
 1238         else
 1239                 adapt->adapt_openings = sc->sc_max_cmds;
 1240         adapt->adapt_max_periph = adapt->adapt_openings;
 1241         adapt->adapt_request = mfi_scsipi_request;
 1242         adapt->adapt_minphys = mfiminphys;
 1243 
 1244         memset(chan, 0, sizeof(*chan));
 1245         chan->chan_adapter = adapt;
 1246         chan->chan_bustype = &scsi_sas_bustype;
 1247         chan->chan_channel = 0;
 1248         chan->chan_flags = 0;
 1249         chan->chan_nluns = 8;
 1250         chan->chan_ntargets = MFI_MAX_LD;
 1251         chan->chan_id = MFI_MAX_LD;
 1252 
 1253         mfi_rescan(sc->sc_dev, NULL, NULL);
 1254 
 1255         /* enable interrupts */
 1256         mfi_intr_enable(sc);
 1257 
 1258 #if NBIO > 0
 1259         if (bio_register(sc->sc_dev, mfi_ioctl) != 0)
 1260                 panic("%s: controller registration failed", DEVNAME(sc));
 1261         if (mfi_create_sensors(sc) != 0)
 1262                 aprint_error_dev(sc->sc_dev, "unable to create sensors\n");
 1263 #endif /* NBIO > 0 */
 1264         if (!pmf_device_register1(sc->sc_dev, mfi_suspend, mfi_resume,
 1265             mfi_shutdown)) {
 1266                 aprint_error_dev(sc->sc_dev,
 1267                     "couldn't establish power handler\n");
 1268         }
 1269 
 1270         return 0;
 1271 noinit:
 1272         mfi_freemem(sc, &sc->sc_sense);
 1273 nosense:
 1274         mfi_freemem(sc, &sc->sc_frames);
 1275 noframe:
 1276         mfi_freemem(sc, &sc->sc_pcq);
 1277 nopcq:
 1278         if (sc->sc_ioptype == MFI_IOP_TBOLT) {
 1279                 if (sc->sc_tbolt_reqmsgpool)
 1280                         mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool);
 1281                 if (sc->sc_tbolt_verbuf)
 1282                         mfi_freemem(sc, &sc->sc_tbolt_verbuf);
 1283         }
 1284         return 1;
 1285 }
 1286 
 1287 static int
 1288 mfi_poll(struct mfi_ccb *ccb)
 1289 {
 1290         struct mfi_softc *sc = ccb->ccb_sc;
 1291         struct mfi_frame_header *hdr;
 1292         int                     to = 0;
 1293         int                     rv = 0;
 1294 
 1295         DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
 1296 
 1297         hdr = &ccb->ccb_frame->mfr_header;
 1298         hdr->mfh_cmd_status = 0xff;
 1299         if (!sc->sc_MFA_enabled)
 1300                 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
 1301 
 1302         /* no callback, caller is supposed to do the cleanup */
 1303         ccb->ccb_done = NULL;
 1304 
 1305         mfi_post(sc, ccb);
 1306         if (sc->sc_MFA_enabled) {
 1307                 /*
 1308                  * depending on the command type, result may be posted
 1309                  * to *hdr, or not. In addition it seems there's
 1310                  * no way to avoid posting the SMID to the reply queue.
 1311                  * So pool using the interrupt routine.
 1312                  */
 1313                  while (ccb->ccb_state != MFI_CCB_DONE) {
 1314                         delay(1000);
 1315                         if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
 1316                                 rv = 1;
 1317                                 break;
 1318                         }
 1319                         mfi_tbolt_intrh(sc);
 1320                  }
 1321         } else {
 1322                 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
 1323                     ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
 1324                     sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
 1325 
 1326                 while (hdr->mfh_cmd_status == 0xff) {
 1327                         delay(1000);
 1328                         if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
 1329                                 rv = 1;
 1330                                 break;
 1331                         }
 1332                         bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
 1333                             ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
 1334                             sc->sc_frames_size, BUS_DMASYNC_POSTREAD);
 1335                 }
 1336         }
 1337         bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
 1338             ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
 1339             sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1340 
 1341         if (ccb->ccb_data != NULL) {
 1342                 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
 1343                     DEVNAME(sc));
 1344                 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
 1345                     ccb->ccb_dmamap->dm_mapsize,
 1346                     (ccb->ccb_direction & MFI_DATA_IN) ?
 1347                     BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
 1348 
 1349                 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
 1350         }
 1351 
 1352         if (rv != 0) {
 1353                 aprint_error_dev(sc->sc_dev, "timeout on ccb %d\n",
 1354                     hdr->mfh_context);
 1355                 ccb->ccb_flags |= MFI_CCB_F_ERR;
 1356                 return 1;
 1357         }
 1358 
 1359         return 0;
 1360 }
 1361 
 1362 int
 1363 mfi_intr(void *arg)
 1364 {
 1365         struct mfi_softc        *sc = arg;
 1366         struct mfi_prod_cons    *pcq;
 1367         struct mfi_ccb          *ccb;
 1368         uint32_t                producer, consumer, ctx;
 1369         int                     claimed = 0;
 1370 
 1371         if (!mfi_my_intr(sc))
 1372                 return 0;
 1373 
 1374         pcq = MFIMEM_KVA(sc->sc_pcq);
 1375 
 1376         DNPRINTF(MFI_D_INTR, "%s: mfi_intr %p %p\n", DEVNAME(sc), sc, pcq);
 1377 
 1378         bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
 1379             sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
 1380             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1381 
 1382         producer = pcq->mpc_producer;
 1383         consumer = pcq->mpc_consumer;
 1384 
 1385         while (consumer != producer) {
 1386                 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
 1387                     DEVNAME(sc), producer, consumer);
 1388 
 1389                 ctx = pcq->mpc_reply_q[consumer];
 1390                 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
 1391                 if (ctx == MFI_INVALID_CTX)
 1392                         aprint_error_dev(sc->sc_dev,
 1393                             "invalid context, p: %d c: %d\n",
 1394                             producer, consumer);
 1395                 else {
 1396                         /* XXX remove from queue and call scsi_done */
 1397                         ccb = &sc->sc_ccb[ctx];
 1398                         DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
 1399                             DEVNAME(sc), ctx);
 1400                         bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
 1401                             ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
 1402                             sc->sc_frames_size,
 1403                             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1404                         ccb->ccb_done(ccb);
 1405 
 1406                         claimed = 1;
 1407                 }
 1408                 consumer++;
 1409                 if (consumer == (sc->sc_max_cmds + 1))
 1410                         consumer = 0;
 1411         }
 1412 
 1413         pcq->mpc_consumer = consumer;
 1414         bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0,
 1415             sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons),
 1416             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1417 
 1418         return claimed;
 1419 }
 1420 
 1421 static int
 1422 mfi_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint64_t blockno,
 1423     uint32_t blockcnt)
 1424 {
 1425         struct scsipi_periph *periph = xs->xs_periph;
 1426         struct mfi_io_frame   *io;
 1427 
 1428         DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld_io: %d\n",
 1429             device_xname(periph->periph_channel->chan_adapter->adapt_dev),
 1430             periph->periph_target);
 1431 
 1432         if (!xs->data)
 1433                 return 1;
 1434 
 1435         io = &ccb->ccb_frame->mfr_io;
 1436         if (xs->xs_control & XS_CTL_DATA_IN) {
 1437                 io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
 1438                 ccb->ccb_direction = MFI_DATA_IN;
 1439         } else {
 1440                 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
 1441                 ccb->ccb_direction = MFI_DATA_OUT;
 1442         }
 1443         io->mif_header.mfh_target_id = periph->periph_target;
 1444         io->mif_header.mfh_timeout = 0;
 1445         io->mif_header.mfh_flags = 0;
 1446         io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
 1447         io->mif_header.mfh_data_len= blockcnt;
 1448         io->mif_lba_hi = (blockno >> 32);
 1449         io->mif_lba_lo = (blockno & 0xffffffff);
 1450         io->mif_sense_addr_lo = htole32(ccb->ccb_psense);
 1451         io->mif_sense_addr_hi = 0;
 1452 
 1453         ccb->ccb_done = mfi_scsi_ld_done;
 1454         ccb->ccb_xs = xs;
 1455         ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
 1456         ccb->ccb_sgl = &io->mif_sgl;
 1457         ccb->ccb_data = xs->data;
 1458         ccb->ccb_len = xs->datalen;
 1459 
 1460         if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
 1461             BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
 1462                 return 1;
 1463 
 1464         return 0;
 1465 }
 1466 
 1467 static void
 1468 mfi_scsi_ld_done(struct mfi_ccb *ccb)
 1469 {
 1470         struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
 1471         mfi_scsi_xs_done(ccb, hdr->mfh_cmd_status, hdr->mfh_scsi_status);
 1472 }
 1473 
 1474 static void
 1475 mfi_scsi_xs_done(struct mfi_ccb *ccb, int status, int scsi_status)
 1476 {
 1477         struct scsipi_xfer      *xs = ccb->ccb_xs;
 1478         struct mfi_softc        *sc = ccb->ccb_sc;
 1479 
 1480         DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %p %p\n",
 1481             DEVNAME(sc), ccb, ccb->ccb_frame);
 1482 
 1483         if (xs->data != NULL) {
 1484                 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n",
 1485                     DEVNAME(sc));
 1486                 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
 1487                     ccb->ccb_dmamap->dm_mapsize,
 1488                     (xs->xs_control & XS_CTL_DATA_IN) ?
 1489                     BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
 1490 
 1491                 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
 1492         }
 1493 
 1494         if (status != MFI_STAT_OK) {
 1495                 xs->error = XS_DRIVER_STUFFUP;
 1496                 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n",
 1497                     DEVNAME(sc), status);
 1498 
 1499                 if (scsi_status != 0) {
 1500                         bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
 1501                             ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
 1502                             MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD);
 1503                         DNPRINTF(MFI_D_INTR,
 1504                             "%s: mfi_scsi_xs_done sense %#x %p %p\n",
 1505                             DEVNAME(sc), scsi_status,
 1506                             &xs->sense, ccb->ccb_sense);
 1507                         memset(&xs->sense, 0, sizeof(xs->sense));
 1508                         memcpy(&xs->sense, ccb->ccb_sense,
 1509                             sizeof(struct scsi_sense_data));
 1510                         xs->error = XS_SENSE;
 1511                 }
 1512         } else {
 1513                 xs->error = XS_NOERROR;
 1514                 xs->status = SCSI_OK;
 1515                 xs->resid = 0;
 1516         }
 1517 
 1518         mfi_put_ccb(ccb);
 1519         scsipi_done(xs);
 1520 }
 1521 
 1522 static int
 1523 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs)
 1524 {
 1525         struct mfi_pass_frame   *pf;
 1526         struct scsipi_periph *periph = xs->xs_periph;
 1527 
 1528         DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
 1529             device_xname(periph->periph_channel->chan_adapter->adapt_dev),
 1530             periph->periph_target);
 1531 
 1532         pf = &ccb->ccb_frame->mfr_pass;
 1533         pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
 1534         pf->mpf_header.mfh_target_id = periph->periph_target;
 1535         pf->mpf_header.mfh_lun_id = 0;
 1536         pf->mpf_header.mfh_cdb_len = xs->cmdlen;
 1537         pf->mpf_header.mfh_timeout = 0;
 1538         pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */
 1539         pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
 1540 
 1541         pf->mpf_sense_addr_hi = 0;
 1542         pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense);
 1543 
 1544         memset(pf->mpf_cdb, 0, 16);
 1545         memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen);
 1546 
 1547         ccb->ccb_done = mfi_scsi_ld_done;
 1548         ccb->ccb_xs = xs;
 1549         ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
 1550         ccb->ccb_sgl = &pf->mpf_sgl;
 1551 
 1552         if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
 1553                 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ?
 1554                     MFI_DATA_IN : MFI_DATA_OUT;
 1555         else
 1556                 ccb->ccb_direction = MFI_DATA_NONE;
 1557 
 1558         if (xs->data) {
 1559                 ccb->ccb_data = xs->data;
 1560                 ccb->ccb_len = xs->datalen;
 1561 
 1562                 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ?
 1563                     BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
 1564                         return 1;
 1565         }
 1566 
 1567         return 0;
 1568 }
 1569 
 1570 static void
 1571 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
 1572     void *arg)
 1573 {
 1574         struct scsipi_periph    *periph;
 1575         struct scsipi_xfer      *xs;
 1576         struct scsipi_adapter   *adapt = chan->chan_adapter;
 1577         struct mfi_softc        *sc = device_private(adapt->adapt_dev);
 1578         struct mfi_ccb          *ccb;
 1579         struct scsi_rw_6        *rw;
 1580         struct scsipi_rw_10     *rwb;
 1581         struct scsipi_rw_12     *rw12;
 1582         struct scsipi_rw_16     *rw16;
 1583         union mfi_mbox          mbox;
 1584         uint64_t                blockno;
 1585         uint32_t                blockcnt;
 1586         uint8_t                 target;
 1587         int                     s;
 1588 
 1589         switch (req) {
 1590         case ADAPTER_REQ_GROW_RESOURCES:
 1591                 /* Not supported. */
 1592                 return;
 1593         case ADAPTER_REQ_SET_XFER_MODE:
 1594         {
 1595                 struct scsipi_xfer_mode *xm = arg;
 1596                 xm->xm_mode = PERIPH_CAP_TQING;
 1597                 xm->xm_period = 0;
 1598                 xm->xm_offset = 0;
 1599                 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm);
 1600                 return;
 1601         }
 1602         case ADAPTER_REQ_RUN_XFER:
 1603                 break;
 1604         }
 1605 
 1606         xs = arg;
 1607 
 1608         periph = xs->xs_periph;
 1609         target = periph->periph_target;
 1610 
 1611         DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x "
 1612             "target %d lun %d\n", DEVNAME(sc), req, xs->cmd->opcode,
 1613             periph->periph_target, periph->periph_lun);
 1614 
 1615         s = splbio();
 1616         if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present ||
 1617             periph->periph_lun != 0) {
 1618                 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
 1619                     DEVNAME(sc), target);
 1620                 xs->error = XS_SELTIMEOUT;
 1621                 scsipi_done(xs);
 1622                 splx(s);
 1623                 return;
 1624         }
 1625         if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 ||
 1626             xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) {
 1627                 /* the cache is stable storage, don't flush */
 1628                 xs->error = XS_NOERROR;
 1629                 xs->status = SCSI_OK;
 1630                 xs->resid = 0;
 1631                 scsipi_done(xs);
 1632                 splx(s);
 1633                 return;
 1634         }
 1635 
 1636         if ((ccb = mfi_get_ccb(sc)) == NULL) {
 1637                 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc));
 1638                 xs->error = XS_RESOURCE_SHORTAGE;
 1639                 scsipi_done(xs);
 1640                 splx(s);
 1641                 return;
 1642         }
 1643 
 1644         switch (xs->cmd->opcode) {
 1645         /* IO path */
 1646         case READ_16:
 1647         case WRITE_16:
 1648                 rw16 = (struct scsipi_rw_16 *)xs->cmd;
 1649                 blockno = _8btol(rw16->addr);
 1650                 blockcnt = _4btol(rw16->length);
 1651                 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
 1652                         goto stuffup;
 1653                 }
 1654                 break;
 1655 
 1656         case READ_12:
 1657         case WRITE_12:
 1658                 rw12 = (struct scsipi_rw_12 *)xs->cmd;
 1659                 blockno = _4btol(rw12->addr);
 1660                 blockcnt = _4btol(rw12->length);
 1661                 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
 1662                         goto stuffup;
 1663                 }
 1664                 break;
 1665 
 1666         case READ_10:
 1667         case WRITE_10:
 1668                 rwb = (struct scsipi_rw_10 *)xs->cmd;
 1669                 blockno = _4btol(rwb->addr);
 1670                 blockcnt = _2btol(rwb->length);
 1671                 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
 1672                         goto stuffup;
 1673                 }
 1674                 break;
 1675 
 1676         case SCSI_READ_6_COMMAND:
 1677         case SCSI_WRITE_6_COMMAND:
 1678                 rw = (struct scsi_rw_6 *)xs->cmd;
 1679                 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
 1680                 blockcnt = rw->length ? rw->length : 0x100;
 1681                 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) {
 1682                         goto stuffup;
 1683                 }
 1684                 break;
 1685 
 1686         case SCSI_SYNCHRONIZE_CACHE_10:
 1687         case SCSI_SYNCHRONIZE_CACHE_16:
 1688                 memset(&mbox, 0, sizeof(mbox));
 1689                 mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
 1690                 if (mfi_mgmt(ccb, xs,
 1691                     MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, &mbox)) {
 1692                         goto stuffup;
 1693                 }
 1694                 break;
 1695 
 1696         /* hand it of to the firmware and let it deal with it */
 1697         case SCSI_TEST_UNIT_READY:
 1698                 /* save off sd? after autoconf */
 1699                 if (!cold)      /* XXX bogus */
 1700                         strlcpy(sc->sc_ld[target].ld_dev, device_xname(sc->sc_dev),
 1701                             sizeof(sc->sc_ld[target].ld_dev));
 1702                 /* FALLTHROUGH */
 1703 
 1704         default:
 1705                 if (mfi_scsi_ld(ccb, xs)) {
 1706                         goto stuffup;
 1707                 }
 1708                 break;
 1709         }
 1710 
 1711         DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
 1712 
 1713         if (xs->xs_control & XS_CTL_POLL) {
 1714                 if (mfi_poll(ccb)) {
 1715                         /* XXX check for sense in ccb->ccb_sense? */
 1716                         aprint_error_dev(sc->sc_dev,
 1717                             "mfi_scsipi_request poll failed\n");
 1718                         memset(&xs->sense, 0, sizeof(xs->sense));
 1719                         xs->sense.scsi_sense.response_code =
 1720                             SSD_RCODE_VALID | SSD_RCODE_CURRENT;
 1721                         xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
 1722                         xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */
 1723                         xs->error = XS_SENSE;
 1724                         xs->status = SCSI_CHECK;
 1725                 } else {
 1726                         DNPRINTF(MFI_D_DMA,
 1727                             "%s: mfi_scsipi_request poll complete %d\n",
 1728                             DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs);
 1729                         xs->error = XS_NOERROR;
 1730                         xs->status = SCSI_OK;
 1731                         xs->resid = 0;
 1732                 }
 1733                 mfi_put_ccb(ccb);
 1734                 scsipi_done(xs);
 1735                 splx(s);
 1736                 return;
 1737         }
 1738 
 1739         mfi_post(sc, ccb);
 1740 
 1741         DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc),
 1742             ccb->ccb_dmamap->dm_nsegs);
 1743 
 1744         splx(s);
 1745         return;
 1746 
 1747 stuffup:
 1748         mfi_put_ccb(ccb);
 1749         xs->error = XS_DRIVER_STUFFUP;
 1750         scsipi_done(xs);
 1751         splx(s);
 1752 }
 1753 
 1754 static int
 1755 mfi_create_sgl(struct mfi_ccb *ccb, int flags)
 1756 {
 1757         struct mfi_softc        *sc = ccb->ccb_sc;
 1758         struct mfi_frame_header *hdr;
 1759         bus_dma_segment_t       *sgd;
 1760         union mfi_sgl           *sgl;
 1761         int                     error, i;
 1762 
 1763         DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %p\n", DEVNAME(sc),
 1764             ccb->ccb_data);
 1765 
 1766         if (!ccb->ccb_data)
 1767                 return 1;
 1768 
 1769         KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p());
 1770         error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap,
 1771             ccb->ccb_data, ccb->ccb_len, NULL, flags);
 1772         if (error) {
 1773                 if (error == EFBIG) {
 1774                         aprint_error_dev(sc->sc_dev, "more than %d dma segs\n",
 1775                             sc->sc_max_sgl);
 1776                 } else {
 1777                         aprint_error_dev(sc->sc_dev,
 1778                             "error %d loading dma map\n", error);
 1779                 }
 1780                 return 1;
 1781         }
 1782 
 1783         hdr = &ccb->ccb_frame->mfr_header;
 1784         sgl = ccb->ccb_sgl;
 1785         sgd = ccb->ccb_dmamap->dm_segs;
 1786         for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
 1787                 if (((sc->sc_ioptype == MFI_IOP_SKINNY) ||
 1788                         (sc->sc_ioptype == MFI_IOP_TBOLT)) &&
 1789                     (hdr->mfh_cmd == MFI_CMD_PD_SCSI_IO ||
 1790                      hdr->mfh_cmd == MFI_CMD_LD_READ ||
 1791                      hdr->mfh_cmd == MFI_CMD_LD_WRITE)) {
 1792                         sgl->sg_ieee[i].addr = htole64(sgd[i].ds_addr);
 1793                         sgl->sg_ieee[i].len = htole32(sgd[i].ds_len);
 1794                         sgl->sg_ieee[i].flags = 0;
 1795                         DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#"
 1796                             PRIx32 "\n",
 1797                             DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
 1798                         hdr->mfh_flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
 1799                 } else if (sc->sc_64bit_dma) {
 1800                         sgl->sg64[i].addr = htole64(sgd[i].ds_addr);
 1801                         sgl->sg64[i].len = htole32(sgd[i].ds_len);
 1802                         DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#"
 1803                             PRIx32 "\n",
 1804                             DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
 1805                         hdr->mfh_flags |= MFI_FRAME_SGL64;
 1806                 } else {
 1807                         sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
 1808                         sgl->sg32[i].len = htole32(sgd[i].ds_len);
 1809                         DNPRINTF(MFI_D_DMA, "%s: addr: %#x  len: %#x\n",
 1810                             DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
 1811                         hdr->mfh_flags |= MFI_FRAME_SGL32;
 1812                 }
 1813         }
 1814 
 1815         if (ccb->ccb_direction == MFI_DATA_IN) {
 1816                 hdr->mfh_flags |= MFI_FRAME_DIR_READ;
 1817                 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
 1818                     ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 1819         } else {
 1820                 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
 1821                 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
 1822                     ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
 1823         }
 1824 
 1825         hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
 1826         ccb->ccb_frame_size += sc->sc_sgl_size * ccb->ccb_dmamap->dm_nsegs;
 1827         ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
 1828 
 1829         DNPRINTF(MFI_D_DMA, "%s: sg_count: %d  frame_size: %d  frames_size: %d"
 1830             "  dm_nsegs: %d  extra_frames: %d\n",
 1831             DEVNAME(sc),
 1832             hdr->mfh_sg_count,
 1833             ccb->ccb_frame_size,
 1834             sc->sc_frames_size,
 1835             ccb->ccb_dmamap->dm_nsegs,
 1836             ccb->ccb_extra_frames);
 1837 
 1838         return 0;
 1839 }
 1840 
 1841 static int
 1842 mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir,
 1843     uint32_t len, void *buf, const union mfi_mbox *mbox, bool poll)
 1844 {
 1845         struct mfi_ccb          *ccb;
 1846         int                     rv = 1;
 1847 
 1848         if ((ccb = mfi_get_ccb(sc)) == NULL)
 1849                 return rv;
 1850         rv = mfi_mgmt(ccb, NULL, opc, dir, len, buf, mbox);
 1851         if (rv)
 1852                 return rv;
 1853 
 1854         if (poll) {
 1855                 rv = 1;
 1856                 if (mfi_poll(ccb))
 1857                         goto done;
 1858         } else {
 1859                 mfi_post(sc, ccb);
 1860 
 1861                 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt_internal sleeping\n",
 1862                     DEVNAME(sc));
 1863                 while (ccb->ccb_state != MFI_CCB_DONE)
 1864                         tsleep(ccb, PRIBIO, "mfi_mgmt", 0);
 1865 
 1866                 if (ccb->ccb_flags & MFI_CCB_F_ERR)
 1867                         goto done;
 1868         }
 1869         rv = 0;
 1870 
 1871 done:
 1872         mfi_put_ccb(ccb);
 1873         return rv;
 1874 }
 1875 
 1876 static int
 1877 mfi_mgmt(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint32_t opc,
 1878     uint32_t dir, uint32_t len, void *buf, const union mfi_mbox *mbox)
 1879 {
 1880         struct mfi_dcmd_frame   *dcmd;
 1881 
 1882         DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(ccb->ccb_sc), opc);
 1883 
 1884         dcmd = &ccb->ccb_frame->mfr_dcmd;
 1885         memset(dcmd->mdf_mbox.b, 0, MFI_MBOX_SIZE);
 1886         dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
 1887         dcmd->mdf_header.mfh_timeout = 0;
 1888 
 1889         dcmd->mdf_opcode = opc;
 1890         dcmd->mdf_header.mfh_data_len = 0;
 1891         ccb->ccb_direction = dir;
 1892         ccb->ccb_xs = xs;
 1893         ccb->ccb_done = mfi_mgmt_done;
 1894 
 1895         ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
 1896 
 1897         /* handle special opcodes */
 1898         if (mbox)
 1899                 memcpy(dcmd->mdf_mbox.b, mbox, MFI_MBOX_SIZE);
 1900 
 1901         if (dir != MFI_DATA_NONE) {
 1902                 dcmd->mdf_header.mfh_data_len = len;
 1903                 ccb->ccb_data = buf;
 1904                 ccb->ccb_len = len;
 1905                 ccb->ccb_sgl = &dcmd->mdf_sgl;
 1906 
 1907                 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK))
 1908                         return 1;
 1909         }
 1910         return 0;
 1911 }
 1912 
 1913 static void
 1914 mfi_mgmt_done(struct mfi_ccb *ccb)
 1915 {
 1916         struct scsipi_xfer      *xs = ccb->ccb_xs;
 1917         struct mfi_softc        *sc = ccb->ccb_sc;
 1918         struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header;
 1919 
 1920         DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n",
 1921             DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame);
 1922 
 1923         if (ccb->ccb_data != NULL) {
 1924                 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n",
 1925                     DEVNAME(sc));
 1926                 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
 1927                     ccb->ccb_dmamap->dm_mapsize,
 1928                     (ccb->ccb_direction & MFI_DATA_IN) ?
 1929                     BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
 1930 
 1931                 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap);
 1932         }
 1933 
 1934         if (hdr->mfh_cmd_status != MFI_STAT_OK)
 1935                 ccb->ccb_flags |= MFI_CCB_F_ERR;
 1936 
 1937         ccb->ccb_state = MFI_CCB_DONE;
 1938         if (xs) {
 1939                 if (hdr->mfh_cmd_status != MFI_STAT_OK) {
 1940                         xs->error = XS_DRIVER_STUFFUP;
 1941                 } else {
 1942                         xs->error = XS_NOERROR;
 1943                         xs->status = SCSI_OK;
 1944                         xs->resid = 0;
 1945                 }
 1946                 mfi_put_ccb(ccb);
 1947                 scsipi_done(xs);
 1948         } else
 1949                 wakeup(ccb);
 1950 }
 1951 
 1952 #if NBIO > 0
 1953 int
 1954 mfi_ioctl(device_t dev, u_long cmd, void *addr)
 1955 {
 1956         struct mfi_softc *sc = device_private(dev);
 1957         int error = 0;
 1958         int s;
 1959 
 1960         KERNEL_LOCK(1, curlwp);
 1961         s = splbio();
 1962 
 1963         DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
 1964 
 1965         switch (cmd) {
 1966         case BIOCINQ:
 1967                 DNPRINTF(MFI_D_IOCTL, "inq\n");
 1968                 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
 1969                 break;
 1970 
 1971         case BIOCVOL:
 1972                 DNPRINTF(MFI_D_IOCTL, "vol\n");
 1973                 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
 1974                 break;
 1975 
 1976         case BIOCDISK:
 1977                 DNPRINTF(MFI_D_IOCTL, "disk\n");
 1978                 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
 1979                 break;
 1980 
 1981         case BIOCALARM:
 1982                 DNPRINTF(MFI_D_IOCTL, "alarm\n");
 1983                 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
 1984                 break;
 1985 
 1986         case BIOCBLINK:
 1987                 DNPRINTF(MFI_D_IOCTL, "blink\n");
 1988                 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
 1989                 break;
 1990 
 1991         case BIOCSETSTATE:
 1992                 DNPRINTF(MFI_D_IOCTL, "setstate\n");
 1993                 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
 1994                 break;
 1995 
 1996         default:
 1997                 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
 1998                 error = EINVAL;
 1999         }
 2000         splx(s);
 2001         KERNEL_UNLOCK_ONE(curlwp);
 2002 
 2003         DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error);
 2004         return error;
 2005 }
 2006 
 2007 static int
 2008 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
 2009 {
 2010         struct mfi_conf         *cfg;
 2011         int                     rv = EINVAL;
 2012 
 2013         DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
 2014 
 2015         if (mfi_get_info(sc)) {
 2016                 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n",
 2017                     DEVNAME(sc));
 2018                 return EIO;
 2019         }
 2020 
 2021         /* get figures */
 2022         cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
 2023         if (mfi_mgmt_internal(sc, MR_DCMD_CONF_GET, MFI_DATA_IN,
 2024             sizeof *cfg, cfg, NULL, false))
 2025                 goto freeme;
 2026 
 2027         strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
 2028         bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
 2029         bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
 2030 
 2031         rv = 0;
 2032 freeme:
 2033         free(cfg, M_DEVBUF);
 2034         return rv;
 2035 }
 2036 
 2037 static int
 2038 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
 2039 {
 2040         int                     i, per, rv = EINVAL;
 2041         union mfi_mbox          mbox;
 2042 
 2043         DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
 2044             DEVNAME(sc), bv->bv_volid);
 2045 
 2046         if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
 2047             sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false))
 2048                 goto done;
 2049 
 2050         i = bv->bv_volid;
 2051         memset(&mbox, 0, sizeof(mbox));
 2052         mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
 2053         DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n",
 2054             DEVNAME(sc), mbox.b[0]);
 2055 
 2056         if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN,
 2057             sizeof(sc->sc_ld_details), &sc->sc_ld_details, &mbox, false))
 2058                 goto done;
 2059 
 2060         if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
 2061                 /* go do hotspares */
 2062                 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
 2063                 goto done;
 2064         }
 2065 
 2066         strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev));
 2067 
 2068         switch (sc->sc_ld_list.mll_list[i].mll_state) {
 2069         case MFI_LD_OFFLINE:
 2070                 bv->bv_status = BIOC_SVOFFLINE;
 2071                 break;
 2072 
 2073         case MFI_LD_PART_DEGRADED:
 2074         case MFI_LD_DEGRADED:
 2075                 bv->bv_status = BIOC_SVDEGRADED;
 2076                 break;
 2077 
 2078         case MFI_LD_ONLINE:
 2079                 bv->bv_status = BIOC_SVONLINE;
 2080                 break;
 2081 
 2082         default:
 2083                 bv->bv_status = BIOC_SVINVALID;
 2084                 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
 2085                     DEVNAME(sc),
 2086                     sc->sc_ld_list.mll_list[i].mll_state);
 2087         }
 2088 
 2089         /* additional status can modify MFI status */
 2090         switch (sc->sc_ld_details.mld_progress.mlp_in_prog) {
 2091         case MFI_LD_PROG_CC:
 2092                 bv->bv_status = BIOC_SVSCRUB;
 2093                 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress;
 2094                 bv->bv_percent = (per * 100) / 0xffff;
 2095                 bv->bv_seconds =
 2096                     sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds;
 2097                 break;
 2098 
 2099         case MFI_LD_PROG_BGI:
 2100                 bv->bv_status = BIOC_SVSCRUB;
 2101                 per = (int)sc->sc_ld_details.mld_progress.mlp_bgi.mp_progress;
 2102                 bv->bv_percent = (per * 100) / 0xffff;
 2103                 bv->bv_seconds =
 2104                     sc->sc_ld_details.mld_progress.mlp_bgi.mp_elapsed_seconds;
 2105                 break;
 2106 
 2107         case MFI_LD_PROG_FGI:
 2108         case MFI_LD_PROG_RECONSTRUCT:
 2109                 /* nothing yet */
 2110                 break;
 2111         }
 2112 
 2113         /*
 2114          * The RAID levels are determined per the SNIA DDF spec, this is only
 2115          * a subset that is valid for the MFI controller.
 2116          */
 2117         bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid;
 2118         if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid ==
 2119             MFI_DDF_SRL_SPANNED)
 2120                 bv->bv_level *= 10;
 2121 
 2122         bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span *
 2123             sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth;
 2124 
 2125         bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */
 2126         bv->bv_stripe_size =
 2127             (512 << sc->sc_ld_details.mld_cfg.mlc_parm.mpa_stripe_size)
 2128             / 1024; /* in KB */
 2129 
 2130         rv = 0;
 2131 done:
 2132         DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n",
 2133             DEVNAME(sc), rv);
 2134         return rv;
 2135 }
 2136 
 2137 static int
 2138 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
 2139 {
 2140         struct mfi_conf         *cfg;
 2141         struct mfi_array        *ar;
 2142         struct mfi_ld_cfg       *ld;
 2143         struct mfi_pd_details   *pd;
 2144         struct scsipi_inquiry_data *inqbuf;
 2145         char                    vend[8+16+4+1];
 2146         int                     i, rv = EINVAL;
 2147         int                     arr, vol, disk;
 2148         uint32_t                size;
 2149         union mfi_mbox          mbox;
 2150 
 2151         DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
 2152             DEVNAME(sc), bd->bd_diskid);
 2153 
 2154         pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
 2155 
 2156         /* send single element command to retrieve size for full structure */
 2157         cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
 2158         if (mfi_mgmt_internal(sc, MR_DCMD_CONF_GET, MFI_DATA_IN,
 2159             sizeof *cfg, cfg, NULL, false))
 2160                 goto freeme;
 2161 
 2162         size = cfg->mfc_size;
 2163         free(cfg, M_DEVBUF);
 2164 
 2165         /* memory for read config */
 2166         cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
 2167         if (mfi_mgmt_internal(sc, MR_DCMD_CONF_GET, MFI_DATA_IN,
 2168             size, cfg, NULL, false))
 2169                 goto freeme;
 2170 
 2171         ar = cfg->mfc_array;
 2172 
 2173         /* calculate offset to ld structure */
 2174         ld = (struct mfi_ld_cfg *)(
 2175             ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
 2176             cfg->mfc_array_size * cfg->mfc_no_array);
 2177 
 2178         vol = bd->bd_volid;
 2179 
 2180         if (vol >= cfg->mfc_no_ld) {
 2181                 /* do hotspares */
 2182                 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
 2183                 goto freeme;
 2184         }
 2185 
 2186         /* find corresponding array for ld */
 2187         for (i = 0, arr = 0; i < vol; i++)
 2188                 arr += ld[i].mlc_parm.mpa_span_depth;
 2189 
 2190         /* offset disk into pd list */
 2191         disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
 2192 
 2193         /* offset array index into the next spans */
 2194         arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
 2195 
 2196         bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
 2197         switch (ar[arr].pd[disk].mar_pd_state){
 2198         case MFI_PD_UNCONFIG_GOOD:
 2199                 bd->bd_status = BIOC_SDUNUSED;
 2200                 break;
 2201 
 2202         case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
 2203                 bd->bd_status = BIOC_SDHOTSPARE;
 2204                 break;
 2205 
 2206         case MFI_PD_OFFLINE:
 2207                 bd->bd_status = BIOC_SDOFFLINE;
 2208                 break;
 2209 
 2210         case MFI_PD_FAILED:
 2211                 bd->bd_status = BIOC_SDFAILED;
 2212                 break;
 2213 
 2214         case MFI_PD_REBUILD:
 2215                 bd->bd_status = BIOC_SDREBUILD;
 2216                 break;
 2217 
 2218         case MFI_PD_ONLINE:
 2219                 bd->bd_status = BIOC_SDONLINE;
 2220                 break;
 2221 
 2222         case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
 2223         default:
 2224                 bd->bd_status = BIOC_SDINVALID;
 2225                 break;
 2226         }
 2227 
 2228         /* get the remaining fields */
 2229         memset(&mbox, 0, sizeof(mbox));
 2230         mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
 2231         memset(pd, 0, sizeof(*pd));
 2232         if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
 2233             sizeof *pd, pd, &mbox, false))
 2234                 goto freeme;
 2235 
 2236         bd->bd_size = pd->mpd_size * 512; /* bytes per block */
 2237 
 2238         /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
 2239         bd->bd_channel = pd->mpd_enc_idx;
 2240 
 2241         inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
 2242         memcpy(vend, inqbuf->vendor, sizeof vend - 1);
 2243         vend[sizeof vend - 1] = '\0';
 2244         strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
 2245 
 2246         /* XXX find a way to retrieve serial nr from drive */
 2247         /* XXX find a way to get bd_procdev */
 2248 
 2249         rv = 0;
 2250 freeme:
 2251         free(pd, M_DEVBUF);
 2252         free(cfg, M_DEVBUF);
 2253 
 2254         return rv;
 2255 }
 2256 
 2257 static int
 2258 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
 2259 {
 2260         uint32_t                opc, dir = MFI_DATA_NONE;
 2261         int                     rv = 0;
 2262         int8_t                  ret;
 2263 
 2264         switch (ba->ba_opcode) {
 2265         case BIOC_SADISABLE:
 2266                 opc = MR_DCMD_SPEAKER_DISABLE;
 2267                 break;
 2268 
 2269         case BIOC_SAENABLE:
 2270                 opc = MR_DCMD_SPEAKER_ENABLE;
 2271                 break;
 2272 
 2273         case BIOC_SASILENCE:
 2274                 opc = MR_DCMD_SPEAKER_SILENCE;
 2275                 break;
 2276 
 2277         case BIOC_GASTATUS:
 2278                 opc = MR_DCMD_SPEAKER_GET;
 2279                 dir = MFI_DATA_IN;
 2280                 break;
 2281 
 2282         case BIOC_SATEST:
 2283                 opc = MR_DCMD_SPEAKER_TEST;
 2284                 break;
 2285 
 2286         default:
 2287                 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
 2288                     "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
 2289                 return EINVAL;
 2290         }
 2291 
 2292         if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL, false))
 2293                 rv = EINVAL;
 2294         else
 2295                 if (ba->ba_opcode == BIOC_GASTATUS)
 2296                         ba->ba_status = ret;
 2297                 else
 2298                         ba->ba_status = 0;
 2299 
 2300         return rv;
 2301 }
 2302 
 2303 static int
 2304 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
 2305 {
 2306         int                     i, found, rv = EINVAL;
 2307         union mfi_mbox          mbox;
 2308         uint32_t                cmd;
 2309         struct mfi_pd_list      *pd;
 2310 
 2311         DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
 2312             bb->bb_status);
 2313 
 2314         /* channel 0 means not in an enclosure so can't be blinked */
 2315         if (bb->bb_channel == 0)
 2316                 return EINVAL;
 2317 
 2318         pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
 2319 
 2320         if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
 2321             sizeof(*pd), pd, NULL, false))
 2322                 goto done;
 2323 
 2324         for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
 2325                 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
 2326                     bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
 2327                         found = 1;
 2328                         break;
 2329                 }
 2330 
 2331         if (!found)
 2332                 goto done;
 2333 
 2334         memset(&mbox, 0, sizeof(mbox));
 2335         mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
 2336 
 2337         switch (bb->bb_status) {
 2338         case BIOC_SBUNBLINK:
 2339                 cmd = MR_DCMD_PD_UNBLINK;
 2340                 break;
 2341 
 2342         case BIOC_SBBLINK:
 2343                 cmd = MR_DCMD_PD_BLINK;
 2344                 break;
 2345 
 2346         case BIOC_SBALARM:
 2347         default:
 2348                 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
 2349                     "opcode %x\n", DEVNAME(sc), bb->bb_status);
 2350                 goto done;
 2351         }
 2352 
 2353 
 2354         if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, &mbox, false))
 2355                 goto done;
 2356 
 2357         rv = 0;
 2358 done:
 2359         free(pd, M_DEVBUF);
 2360         return rv;
 2361 }
 2362 
 2363 static int
 2364 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
 2365 {
 2366         struct mfi_pd_list      *pd;
 2367         int                     i, found, rv = EINVAL;
 2368         union mfi_mbox          mbox;
 2369 
 2370         DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
 2371             bs->bs_status);
 2372 
 2373         pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
 2374 
 2375         if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
 2376             sizeof(*pd), pd, NULL, false))
 2377                 goto done;
 2378 
 2379         for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
 2380                 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
 2381                     bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
 2382                         found = 1;
 2383                         break;
 2384                 }
 2385 
 2386         if (!found)
 2387                 goto done;
 2388 
 2389         memset(&mbox, 0, sizeof(mbox));
 2390         mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
 2391 
 2392         switch (bs->bs_status) {
 2393         case BIOC_SSONLINE:
 2394                 mbox.b[4] = MFI_PD_ONLINE;
 2395                 break;
 2396 
 2397         case BIOC_SSOFFLINE:
 2398                 mbox.b[4] = MFI_PD_OFFLINE;
 2399                 break;
 2400 
 2401         case BIOC_SSHOTSPARE:
 2402                 mbox.b[4] = MFI_PD_HOTSPARE;
 2403                 break;
 2404 /*
 2405         case BIOC_SSREBUILD:
 2406                 break;
 2407 */
 2408         default:
 2409                 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
 2410                     "opcode %x\n", DEVNAME(sc), bs->bs_status);
 2411                 goto done;
 2412         }
 2413 
 2414 
 2415         if (mfi_mgmt_internal(sc, MR_DCMD_PD_SET_STATE, MFI_DATA_NONE,
 2416             0, NULL, &mbox, false))
 2417                 goto done;
 2418 
 2419         rv = 0;
 2420 done:
 2421         free(pd, M_DEVBUF);
 2422         return rv;
 2423 }
 2424 
 2425 static int
 2426 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
 2427 {
 2428         struct mfi_conf         *cfg;
 2429         struct mfi_hotspare     *hs;
 2430         struct mfi_pd_details   *pd;
 2431         struct bioc_disk        *sdhs;
 2432         struct bioc_vol         *vdhs;
 2433         struct scsipi_inquiry_data *inqbuf;
 2434         char                    vend[8+16+4+1];
 2435         int                     i, rv = EINVAL;
 2436         uint32_t                size;
 2437         union mfi_mbox          mbox;
 2438 
 2439         DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
 2440 
 2441         if (!bio_hs)
 2442                 return EINVAL;
 2443 
 2444         pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO);
 2445 
 2446         /* send single element command to retrieve size for full structure */
 2447         cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
 2448         if (mfi_mgmt_internal(sc, MR_DCMD_CONF_GET, MFI_DATA_IN,
 2449             sizeof *cfg, cfg, NULL, false))
 2450                 goto freeme;
 2451 
 2452         size = cfg->mfc_size;
 2453         free(cfg, M_DEVBUF);
 2454 
 2455         /* memory for read config */
 2456         cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
 2457         if (mfi_mgmt_internal(sc, MR_DCMD_CONF_GET, MFI_DATA_IN,
 2458             size, cfg, NULL, false))
 2459                 goto freeme;
 2460 
 2461         /* calculate offset to hs structure */
 2462         hs = (struct mfi_hotspare *)(
 2463             ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
 2464             cfg->mfc_array_size * cfg->mfc_no_array +
 2465             cfg->mfc_ld_size * cfg->mfc_no_ld);
 2466 
 2467         if (volid < cfg->mfc_no_ld)
 2468                 goto freeme; /* not a hotspare */
 2469 
 2470         if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
 2471                 goto freeme; /* not a hotspare */
 2472 
 2473         /* offset into hotspare structure */
 2474         i = volid - cfg->mfc_no_ld;
 2475 
 2476         DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
 2477             "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
 2478             cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
 2479 
 2480         /* get pd fields */
 2481         memset(&mbox, 0, sizeof(mbox));
 2482         mbox.s[0] = hs[i].mhs_pd.mfp_id;
 2483         if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
 2484             sizeof *pd, pd, &mbox, false)) {
 2485                 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
 2486                     DEVNAME(sc));
 2487                 goto freeme;
 2488         }
 2489 
 2490         switch (type) {
 2491         case MFI_MGMT_VD:
 2492                 vdhs = bio_hs;
 2493                 vdhs->bv_status = BIOC_SVONLINE;
 2494                 vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */
 2495                 vdhs->bv_level = -1; /* hotspare */
 2496                 vdhs->bv_nodisk = 1;
 2497                 break;
 2498 
 2499         case MFI_MGMT_SD:
 2500                 sdhs = bio_hs;
 2501                 sdhs->bd_status = BIOC_SDHOTSPARE;
 2502                 sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */
 2503                 sdhs->bd_channel = pd->mpd_enc_idx;
 2504                 sdhs->bd_target = pd->mpd_enc_slot;
 2505                 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data;
 2506                 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1);
 2507                 vend[sizeof vend - 1] = '\0';
 2508                 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
 2509                 break;
 2510 
 2511         default:
 2512                 goto freeme;
 2513         }
 2514 
 2515         DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
 2516         rv = 0;
 2517 freeme:
 2518         free(pd, M_DEVBUF);
 2519         free(cfg, M_DEVBUF);
 2520 
 2521         return rv;
 2522 }
 2523 
 2524 static int
 2525 mfi_destroy_sensors(struct mfi_softc *sc)
 2526 {
 2527         if (sc->sc_sme == NULL)
 2528                 return 0;
 2529         sysmon_envsys_unregister(sc->sc_sme);
 2530         sc->sc_sme = NULL;
 2531         free(sc->sc_sensor, M_DEVBUF);
 2532         return 0;
 2533 }
 2534 
 2535 static int
 2536 mfi_create_sensors(struct mfi_softc *sc)
 2537 {
 2538         int i;
 2539         int nsensors = sc->sc_ld_cnt + 1;
 2540         int rv;
 2541 
 2542         sc->sc_sme = sysmon_envsys_create();
 2543         sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
 2544             M_DEVBUF, M_WAITOK | M_ZERO);
 2545 
 2546         /* BBU */
 2547         sc->sc_sensor[0].units = ENVSYS_INDICATOR;
 2548         sc->sc_sensor[0].state = ENVSYS_SINVALID;
 2549         sc->sc_sensor[0].value_cur = 0;
 2550         /* Enable monitoring for BBU state changes, if present */
 2551         if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU)
 2552                 sc->sc_sensor[0].flags |= ENVSYS_FMONCRITICAL;
 2553         snprintf(sc->sc_sensor[0].desc,
 2554             sizeof(sc->sc_sensor[0].desc), "%s BBU", DEVNAME(sc));
 2555         if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensor[0]))
 2556                 goto out;
 2557 
 2558         for (i = 1; i < nsensors; i++) {
 2559                 sc->sc_sensor[i].units = ENVSYS_DRIVE;
 2560                 sc->sc_sensor[i].state = ENVSYS_SINVALID;
 2561                 sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY;
 2562                 /* Enable monitoring for drive state changes */
 2563                 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
 2564                 /* logical drives */
 2565                 snprintf(sc->sc_sensor[i].desc,
 2566                     sizeof(sc->sc_sensor[i].desc), "%s:%d",
 2567                     DEVNAME(sc), i - 1);
 2568                 if (sysmon_envsys_sensor_attach(sc->sc_sme,
 2569                                                 &sc->sc_sensor[i]))
 2570                         goto out;
 2571         }
 2572 
 2573         sc->sc_sme->sme_name = DEVNAME(sc);
 2574         sc->sc_sme->sme_cookie = sc;
 2575         sc->sc_sme->sme_refresh = mfi_sensor_refresh;
 2576         rv = sysmon_envsys_register(sc->sc_sme);
 2577         if (rv != 0) {
 2578                 aprint_error_dev(sc->sc_dev,
 2579                     "unable to register with sysmon (rv = %d)\n", rv);
 2580                 goto out;
 2581         }
 2582         return 0;
 2583 
 2584 out:
 2585         free(sc->sc_sensor, M_DEVBUF);
 2586         sysmon_envsys_destroy(sc->sc_sme);
 2587         sc->sc_sme = NULL;
 2588         return EINVAL;
 2589 }
 2590 
 2591 static void
 2592 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
 2593 {
 2594         struct mfi_softc        *sc = sme->sme_cookie;
 2595         struct bioc_vol         bv;
 2596         int s;
 2597         int error;
 2598 
 2599         if (edata->sensor >= sc->sc_ld_cnt + 1)
 2600                 return;
 2601 
 2602         if (edata->sensor == 0) {
 2603                 /* BBU */
 2604                 struct mfi_bbu_status   bbu_stat;
 2605                 int bbu_status;
 2606                 if ((sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) == 0)
 2607                         return;
 2608 
 2609                 KERNEL_LOCK(1, curlwp);
 2610                 s = splbio();
 2611                 bbu_status = mfi_get_bbu(sc, &bbu_stat);
 2612                 splx(s);
 2613                 KERNEL_UNLOCK_ONE(curlwp);
 2614                 switch (bbu_status) {
 2615                 case MFI_BBU_GOOD:
 2616                         edata->value_cur = 1;
 2617                         edata->state = ENVSYS_SVALID;
 2618                         if (!sc->sc_bbuok)
 2619                                 aprint_normal_dev(sc->sc_dev,
 2620                                     "BBU state changed to good\n");
 2621                         sc->sc_bbuok = true;
 2622                         break;
 2623                 case MFI_BBU_BAD:
 2624                         edata->value_cur = 0;
 2625                         edata->state = ENVSYS_SCRITICAL;
 2626                         if (sc->sc_bbuok)
 2627                                 aprint_normal_dev(sc->sc_dev,
 2628                                     "BBU state changed to bad\n");
 2629                         sc->sc_bbuok = false;
 2630                         break;
 2631                 case MFI_BBU_UNKNOWN:
 2632                 default:
 2633                         edata->value_cur = 0;
 2634                         edata->state = ENVSYS_SINVALID;
 2635                         sc->sc_bbuok = false;
 2636                         break;
 2637                 }
 2638                 return;
 2639         }
 2640 
 2641         memset(&bv, 0, sizeof(bv));
 2642         bv.bv_volid = edata->sensor - 1;
 2643         KERNEL_LOCK(1, curlwp);
 2644         s = splbio();
 2645         error = mfi_ioctl_vol(sc, &bv);
 2646         splx(s);
 2647         KERNEL_UNLOCK_ONE(curlwp);
 2648         if (error)
 2649                 bv.bv_status = BIOC_SVINVALID;
 2650 
 2651         bio_vol_to_envsys(edata, &bv);
 2652 }
 2653 
 2654 #endif /* NBIO > 0 */
 2655 
 2656 static uint32_t
 2657 mfi_xscale_fw_state(struct mfi_softc *sc)
 2658 {
 2659         return mfi_read(sc, MFI_OMSG0);
 2660 }
 2661 
 2662 static void
 2663 mfi_xscale_intr_dis(struct mfi_softc *sc)
 2664 {
 2665         mfi_write(sc, MFI_OMSK, 0);
 2666 }
 2667 
 2668 static void
 2669 mfi_xscale_intr_ena(struct mfi_softc *sc)
 2670 {
 2671         mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
 2672 }
 2673 
 2674 static int
 2675 mfi_xscale_intr(struct mfi_softc *sc)
 2676 {
 2677         uint32_t status;
 2678 
 2679         status = mfi_read(sc, MFI_OSTS);
 2680         if (!ISSET(status, MFI_OSTS_INTR_VALID))
 2681                 return 0;
 2682 
 2683         /* write status back to acknowledge interrupt */
 2684         mfi_write(sc, MFI_OSTS, status);
 2685         return 1;
 2686 }
 2687 
 2688 static void
 2689 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
 2690 {
 2691         bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
 2692             ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames),
 2693             sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2694         bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense),
 2695             ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense),
 2696             MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD);
 2697 
 2698         mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
 2699             ccb->ccb_extra_frames);
 2700         ccb->ccb_state = MFI_CCB_RUNNING;
 2701 }
 2702 
 2703 static uint32_t
 2704 mfi_ppc_fw_state(struct mfi_softc *sc)
 2705 {
 2706         return mfi_read(sc, MFI_OSP);
 2707 }
 2708 
 2709 static void
 2710 mfi_ppc_intr_dis(struct mfi_softc *sc)
 2711 {
 2712         /* Taking a wild guess --dyoung */
 2713         mfi_write(sc, MFI_OMSK, ~(uint32_t)0x0);
 2714         mfi_write(sc, MFI_ODC, 0xffffffff);
 2715 }
 2716 
 2717 static void
 2718 mfi_ppc_intr_ena(struct mfi_softc *sc)
 2719 {
 2720         mfi_write(sc, MFI_ODC, 0xffffffff);
 2721         mfi_write(sc, MFI_OMSK, ~0x80000004);
 2722 }
 2723 
 2724 static int
 2725 mfi_ppc_intr(struct mfi_softc *sc)
 2726 {
 2727         uint32_t status;
 2728 
 2729         status = mfi_read(sc, MFI_OSTS);
 2730         if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
 2731                 return 0;
 2732 
 2733         /* write status back to acknowledge interrupt */
 2734         mfi_write(sc, MFI_ODC, status);
 2735         return 1;
 2736 }
 2737 
 2738 static void
 2739 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
 2740 {
 2741         mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
 2742             (ccb->ccb_extra_frames << 1));
 2743         ccb->ccb_state = MFI_CCB_RUNNING;
 2744 }
 2745 
 2746 u_int32_t
 2747 mfi_gen2_fw_state(struct mfi_softc *sc)
 2748 {
 2749         return (mfi_read(sc, MFI_OSP));
 2750 }
 2751 
 2752 void
 2753 mfi_gen2_intr_dis(struct mfi_softc *sc)
 2754 {
 2755         mfi_write(sc, MFI_OMSK, 0xffffffff);
 2756         mfi_write(sc, MFI_ODC, 0xffffffff);
 2757 }
 2758 
 2759 void
 2760 mfi_gen2_intr_ena(struct mfi_softc *sc)
 2761 {
 2762         mfi_write(sc, MFI_ODC, 0xffffffff);
 2763         mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
 2764 }
 2765 
 2766 int
 2767 mfi_gen2_intr(struct mfi_softc *sc)
 2768 {
 2769         u_int32_t status;
 2770 
 2771         status = mfi_read(sc, MFI_OSTS);
 2772         if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
 2773                 return (0);
 2774 
 2775         /* write status back to acknowledge interrupt */
 2776         mfi_write(sc, MFI_ODC, status);
 2777 
 2778         return (1);
 2779 }
 2780 
 2781 void
 2782 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
 2783 {
 2784         mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
 2785             (ccb->ccb_extra_frames << 1));
 2786         ccb->ccb_state = MFI_CCB_RUNNING;
 2787 }
 2788 
 2789 u_int32_t
 2790 mfi_skinny_fw_state(struct mfi_softc *sc)
 2791 {
 2792         return (mfi_read(sc, MFI_OSP));
 2793 }
 2794 
 2795 void
 2796 mfi_skinny_intr_dis(struct mfi_softc *sc)
 2797 {
 2798         mfi_write(sc, MFI_OMSK, 0);
 2799 }
 2800 
 2801 void
 2802 mfi_skinny_intr_ena(struct mfi_softc *sc)
 2803 {
 2804         mfi_write(sc, MFI_OMSK, ~0x00000001);
 2805 }
 2806 
 2807 int
 2808 mfi_skinny_intr(struct mfi_softc *sc)
 2809 {
 2810         u_int32_t status;
 2811 
 2812         status = mfi_read(sc, MFI_OSTS);
 2813         if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
 2814                 return (0);
 2815 
 2816         /* write status back to acknowledge interrupt */
 2817         mfi_write(sc, MFI_OSTS, status);
 2818 
 2819         return (1);
 2820 }
 2821 
 2822 void
 2823 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
 2824 {
 2825         mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
 2826             (ccb->ccb_extra_frames << 1));
 2827         mfi_write(sc, MFI_IQPH, 0x00000000);
 2828         ccb->ccb_state = MFI_CCB_RUNNING;
 2829 }
 2830 
 2831 #define MFI_FUSION_ENABLE_INTERRUPT_MASK        (0x00000008)
 2832 
 2833 void
 2834 mfi_tbolt_intr_ena(struct mfi_softc *sc)
 2835 {
 2836         mfi_write(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
 2837         mfi_read(sc, MFI_OMSK);
 2838 }
 2839 
 2840 void
 2841 mfi_tbolt_intr_dis(struct mfi_softc *sc)
 2842 {
 2843         mfi_write(sc, MFI_OMSK, 0xFFFFFFFF);
 2844         mfi_read(sc, MFI_OMSK);
 2845 }
 2846 
 2847 int
 2848 mfi_tbolt_intr(struct mfi_softc *sc)
 2849 {
 2850         int32_t status;
 2851 
 2852         status = mfi_read(sc, MFI_OSTS);
 2853 
 2854         if (ISSET(status, 0x1)) {
 2855                 mfi_write(sc, MFI_OSTS, status);
 2856                 mfi_read(sc, MFI_OSTS);
 2857                 if (ISSET(status, MFI_STATE_CHANGE_INTERRUPT))
 2858                         return 0;
 2859                 return 1;
 2860         }
 2861         if (!ISSET(status, MFI_FUSION_ENABLE_INTERRUPT_MASK))
 2862                 return 0;
 2863         mfi_read(sc, MFI_OSTS);
 2864         return 1;
 2865 }
 2866 
 2867 u_int32_t
 2868 mfi_tbolt_fw_state(struct mfi_softc *sc)
 2869 {
 2870         return mfi_read(sc, MFI_OSP);
 2871 }
 2872 
 2873 void
 2874 mfi_tbolt_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
 2875 {
 2876         if (sc->sc_MFA_enabled) {
 2877                 if ((ccb->ccb_flags & MFI_CCB_F_TBOLT) == 0)
 2878                         mfi_tbolt_build_mpt_ccb(ccb);
 2879                 mfi_write(sc, MFI_IQPL,
 2880                     ccb->ccb_tb_request_desc.words & 0xFFFFFFFF);
 2881                 mfi_write(sc, MFI_IQPH,
 2882                     ccb->ccb_tb_request_desc.words >> 32);
 2883                 ccb->ccb_state = MFI_CCB_RUNNING;
 2884                 return;
 2885         }
 2886         uint64_t bus_add = ccb->ccb_pframe;
 2887         bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
 2888             << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 2889         mfi_write(sc, MFI_IQPL, bus_add);
 2890         mfi_write(sc, MFI_IQPH, bus_add >> 32);
 2891         ccb->ccb_state = MFI_CCB_RUNNING;
 2892 }
 2893 
 2894 static void
 2895 mfi_tbolt_build_mpt_ccb(struct mfi_ccb *ccb)
 2896 {
 2897         union mfi_mpi2_request_descriptor *req_desc = &ccb->ccb_tb_request_desc;
 2898         struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request;
 2899         struct mpi25_ieee_sge_chain64 *mpi25_ieee_chain;
 2900 
 2901         io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
 2902         io_req->SGLOffset0 =
 2903             offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4;
 2904         io_req->ChainOffset =
 2905             offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 16;
 2906 
 2907         mpi25_ieee_chain =
 2908             (struct mpi25_ieee_sge_chain64 *)&io_req->SGL.IeeeChain;
 2909         mpi25_ieee_chain->Address = ccb->ccb_pframe;
 2910 
 2911         /*
 2912           In MFI pass thru, nextChainOffset will always be zero to
 2913           indicate the end of the chain.
 2914         */
 2915         mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
 2916                 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
 2917 
 2918         /* setting the length to the maximum length */
 2919         mpi25_ieee_chain->Length = 1024;
 2920 
 2921         req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
 2922             MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 2923         ccb->ccb_flags |= MFI_CCB_F_TBOLT;
 2924         bus_dmamap_sync(ccb->ccb_sc->sc_dmat,
 2925             MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool),
 2926             ccb->ccb_tb_pio_request -
 2927              MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool),
 2928             MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
 2929             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2930 }
 2931 
 2932 /*
 2933  * Description:
 2934  *      This function will prepare message pools for the Thunderbolt controller
 2935  */
 2936 static int
 2937 mfi_tbolt_init_desc_pool(struct mfi_softc *sc)
 2938 {
 2939         uint32_t     offset = 0;
 2940         uint8_t      *addr = MFIMEM_KVA(sc->sc_tbolt_reqmsgpool);
 2941 
 2942         /* Request Descriptors alignment restrictions */
 2943         KASSERT(((uintptr_t)addr & 0xFF) == 0);
 2944 
 2945         /* Skip request message pool */
 2946         addr = &addr[MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1)];
 2947 
 2948         /* Reply Frame Pool is initialized */
 2949         sc->sc_reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
 2950         KASSERT(((uintptr_t)addr & 0xFF) == 0);
 2951 
 2952         offset = (uintptr_t)sc->sc_reply_frame_pool
 2953             - (uintptr_t)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool);
 2954         sc->sc_reply_frame_busaddr =
 2955             MFIMEM_DVA(sc->sc_tbolt_reqmsgpool) + offset;
 2956 
 2957         /* initializing reply address to 0xFFFFFFFF */
 2958         memset((uint8_t *)sc->sc_reply_frame_pool, 0xFF,
 2959                (MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size));
 2960 
 2961         /* Skip Reply Frame Pool */
 2962         addr += MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
 2963         sc->sc_reply_pool_limit = (void *)addr;
 2964 
 2965         offset = MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size;
 2966         sc->sc_sg_frame_busaddr = sc->sc_reply_frame_busaddr + offset;
 2967 
 2968         /* initialize the last_reply_idx to 0 */
 2969         sc->sc_last_reply_idx = 0;
 2970         offset = (sc->sc_sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
 2971             sc->sc_max_cmds)) - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool);
 2972         KASSERT(offset <= sc->sc_tbolt_reqmsgpool->am_size);
 2973         bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), 0,
 2974             MFIMEM_MAP(sc->sc_tbolt_reqmsgpool)->dm_mapsize,
 2975             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2976         return 0;
 2977 }
 2978 
 2979 /*
 2980  * This routine prepare and issue INIT2 frame to the Firmware
 2981  */
 2982 
 2983 static int
 2984 mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
 2985 {
 2986         struct mpi2_ioc_init_request   *mpi2IocInit;
 2987         struct mfi_init_frame           *mfi_init;
 2988         struct mfi_ccb                  *ccb;
 2989         bus_addr_t                      phyAddress;
 2990         mfi_address                     *mfiAddressTemp;
 2991         int                             s;
 2992         char                            *verbuf;
 2993         char                            wqbuf[10];
 2994 
 2995         /* Check if initialization is already completed */
 2996         if (sc->sc_MFA_enabled) {
 2997                 return 1;
 2998         }
 2999 
 3000         mpi2IocInit =
 3001             (struct mpi2_ioc_init_request *)MFIMEM_KVA(sc->sc_tbolt_ioc_init);
 3002 
 3003         s = splbio();
 3004         if ((ccb = mfi_get_ccb(sc)) == NULL) {
 3005                 splx(s);
 3006                 return (EBUSY);
 3007         }
 3008 
 3009 
 3010         mfi_init = &ccb->ccb_frame->mfr_init;
 3011 
 3012         memset(mpi2IocInit, 0, sizeof(struct mpi2_ioc_init_request));
 3013         mpi2IocInit->Function  = MPI2_FUNCTION_IOC_INIT;
 3014         mpi2IocInit->WhoInit   = MPI2_WHOINIT_HOST_DRIVER;
 3015 
 3016         /* set MsgVersion and HeaderVersion host driver was built with */
 3017         mpi2IocInit->MsgVersion = MPI2_VERSION;
 3018         mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
 3019         mpi2IocInit->SystemRequestFrameSize = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE/4;
 3020         mpi2IocInit->ReplyDescriptorPostQueueDepth =
 3021             (uint16_t)sc->sc_reply_pool_size;
 3022         mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
 3023 
 3024         /* Get physical address of reply frame pool */
 3025         phyAddress = sc->sc_reply_frame_busaddr;
 3026         mfiAddressTemp =
 3027             (mfi_address *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
 3028         mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
 3029         mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
 3030 
 3031         /* Get physical address of request message pool */
 3032         phyAddress =  MFIMEM_DVA(sc->sc_tbolt_reqmsgpool);
 3033         mfiAddressTemp = (mfi_address *)&mpi2IocInit->SystemRequestFrameBaseAddress;
 3034         mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
 3035         mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
 3036 
 3037         mpi2IocInit->ReplyFreeQueueAddress =  0; /* Not supported by MR. */
 3038         mpi2IocInit->TimeStamp = time_uptime;
 3039 
 3040         verbuf = MFIMEM_KVA(sc->sc_tbolt_verbuf);
 3041         snprintf(verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
 3042             MEGASAS_VERSION);
 3043         bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0,
 3044             MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_PREWRITE);
 3045         mfi_init->driver_ver_lo = htole32(MFIMEM_DVA(sc->sc_tbolt_verbuf));
 3046         mfi_init->driver_ver_hi =
 3047                     htole32((uint64_t)MFIMEM_DVA(sc->sc_tbolt_verbuf) >> 32);
 3048 
 3049         bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0,
 3050             MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize,
 3051             BUS_DMASYNC_PREWRITE);
 3052         /* Get the physical address of the mpi2 ioc init command */
 3053         phyAddress =  MFIMEM_DVA(sc->sc_tbolt_ioc_init);
 3054         mfi_init->mif_qinfo_new_addr_lo = htole32(phyAddress);
 3055         mfi_init->mif_qinfo_new_addr_hi = htole32((uint64_t)phyAddress >> 32);
 3056 
 3057         mfi_init->mif_header.mfh_cmd = MFI_CMD_INIT;
 3058         mfi_init->mif_header.mfh_data_len = sizeof(struct mpi2_ioc_init_request);
 3059         if (mfi_poll(ccb) != 0) {
 3060                 aprint_error_dev(sc->sc_dev, "failed to send IOC init2 "
 3061                     "command at 0x%" PRIx64 "\n",
 3062                     (uint64_t)ccb->ccb_pframe);
 3063                 splx(s);
 3064                 return 1;
 3065         }
 3066         bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0,
 3067             MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_POSTWRITE);
 3068         bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0,
 3069             MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize,
 3070             BUS_DMASYNC_POSTWRITE);
 3071         mfi_put_ccb(ccb);
 3072         splx(s);
 3073 
 3074         if (mfi_init->mif_header.mfh_cmd_status == 0) {
 3075                 sc->sc_MFA_enabled = 1;
 3076         }
 3077         else {
 3078                 aprint_error_dev(sc->sc_dev, "Init command Failed %x\n",
 3079                     mfi_init->mif_header.mfh_cmd_status);
 3080                 return 1;
 3081         }
 3082 
 3083         snprintf(wqbuf, sizeof(wqbuf), "%swq", DEVNAME(sc));
 3084         if (workqueue_create(&sc->sc_ldsync_wq, wqbuf, mfi_tbolt_sync_map_info,
 3085             sc, PRIBIO, IPL_BIO, 0) != 0) {
 3086                 aprint_error_dev(sc->sc_dev, "workqueue_create failed\n");
 3087                 return 1;
 3088         }
 3089         workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL);
 3090         return 0;
 3091 }
 3092 
 3093 int
 3094 mfi_tbolt_intrh(void *arg)
 3095 {
 3096         struct mfi_softc        *sc = arg;
 3097         struct mfi_ccb          *ccb;
 3098         union mfi_mpi2_reply_descriptor *desc;
 3099         int smid, num_completed;
 3100 
 3101         if (!mfi_tbolt_intr(sc))
 3102                 return 0;
 3103 
 3104         DNPRINTF(MFI_D_INTR, "%s: mfi_tbolt_intrh %#lx %#lx\n", DEVNAME(sc),
 3105             (u_long)sc, (u_long)sc->sc_last_reply_idx);
 3106 
 3107         KASSERT(sc->sc_last_reply_idx < sc->sc_reply_pool_size);
 3108 
 3109         desc = (union mfi_mpi2_reply_descriptor *)
 3110             ((uintptr_t)sc->sc_reply_frame_pool +
 3111              sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE);
 3112 
 3113         bus_dmamap_sync(sc->sc_dmat,
 3114             MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
 3115             MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1),
 3116             MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size,
 3117             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 3118         num_completed = 0;
 3119         while ((desc->header.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK) !=
 3120             MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
 3121                 smid = desc->header.SMID;
 3122                 KASSERT(smid > 0 && smid <= sc->sc_max_cmds);
 3123                 ccb = &sc->sc_ccb[smid - 1];
 3124                 DNPRINTF(MFI_D_INTR,
 3125                     "%s: mfi_tbolt_intr SMID %#x reply_idx %#x "
 3126                     "desc %#" PRIx64 " ccb %p\n", DEVNAME(sc), smid,
 3127                     sc->sc_last_reply_idx, desc->words, ccb);
 3128                 KASSERT(ccb->ccb_state == MFI_CCB_RUNNING);
 3129                 if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO &&
 3130                     ccb->ccb_tb_io_request->ChainOffset != 0) {
 3131                         bus_dmamap_sync(sc->sc_dmat,
 3132                             MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
 3133                             ccb->ccb_tb_psg_frame -
 3134                                 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
 3135                             MEGASAS_MAX_SZ_CHAIN_FRAME,  BUS_DMASYNC_POSTREAD);
 3136                 }
 3137                 if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO) {
 3138                         bus_dmamap_sync(sc->sc_dmat,
 3139                             MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
 3140                             ccb->ccb_tb_pio_request -
 3141                                 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
 3142                             MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
 3143                             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 3144                 }
 3145                 if (ccb->ccb_done)
 3146                         ccb->ccb_done(ccb);
 3147                 else
 3148                         ccb->ccb_state = MFI_CCB_DONE;
 3149                 sc->sc_last_reply_idx++;
 3150                 if (sc->sc_last_reply_idx >= sc->sc_reply_pool_size) {
 3151                         sc->sc_last_reply_idx = 0;
 3152                 }
 3153                 desc->words = ~0x0;
 3154                 /* Get the next reply descriptor */
 3155                 desc = (union mfi_mpi2_reply_descriptor *)
 3156                     ((uintptr_t)sc->sc_reply_frame_pool +
 3157                      sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE);
 3158                 num_completed++;
 3159         }
 3160         if (num_completed == 0)
 3161                 return 0;
 3162 
 3163         bus_dmamap_sync(sc->sc_dmat,
 3164             MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
 3165             MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1),
 3166             MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size,
 3167             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3168         mfi_write(sc, MFI_RPI, sc->sc_last_reply_idx);
 3169         return 1;
 3170 }
 3171 
 3172 
 3173 int
 3174 mfi_tbolt_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs,
 3175     uint64_t blockno, uint32_t blockcnt)
 3176 {
 3177         struct scsipi_periph *periph = xs->xs_periph;
 3178         struct mfi_mpi2_request_raid_scsi_io    *io_req;
 3179         int sge_count;
 3180 
 3181         DNPRINTF(MFI_D_CMD, "%s: mfi_tbolt_scsi_ld_io: %d\n",
 3182             device_xname(periph->periph_channel->chan_adapter->adapt_dev),
 3183             periph->periph_target);
 3184 
 3185         if (!xs->data)
 3186                 return 1;
 3187 
 3188         ccb->ccb_done = mfi_tbolt_scsi_ld_done;
 3189         ccb->ccb_xs = xs;
 3190         ccb->ccb_data = xs->data;
 3191         ccb->ccb_len = xs->datalen;
 3192 
 3193         io_req = ccb->ccb_tb_io_request;
 3194 
 3195         /* Just the CDB length,rest of the Flags are zero */
 3196         io_req->IoFlags = xs->cmdlen;
 3197         memset(io_req->CDB.CDB32, 0, 32);
 3198         memcpy(io_req->CDB.CDB32, &xs->cmdstore, xs->cmdlen);
 3199 
 3200         io_req->RaidContext.TargetID = periph->periph_target;
 3201         io_req->RaidContext.Status = 0;
 3202         io_req->RaidContext.exStatus = 0;
 3203         io_req->RaidContext.timeoutValue = MFI_FUSION_FP_DEFAULT_TIMEOUT;
 3204         io_req->Function = MPI2_FUNCTION_LD_IO_REQUEST;
 3205         io_req->DevHandle = periph->periph_target;
 3206 
 3207         ccb->ccb_tb_request_desc.header.RequestFlags =
 3208             (MFI_REQ_DESCRIPT_FLAGS_LD_IO << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 3209         io_req->DataLength = blockcnt * MFI_SECTOR_LEN;
 3210 
 3211         if (xs->xs_control & XS_CTL_DATA_IN) {
 3212                 io_req->Control = MPI2_SCSIIO_CONTROL_READ;
 3213                 ccb->ccb_direction = MFI_DATA_IN;
 3214         } else {
 3215                 io_req->Control = MPI2_SCSIIO_CONTROL_WRITE;
 3216                 ccb->ccb_direction = MFI_DATA_OUT;
 3217         }
 3218 
 3219         sge_count = mfi_tbolt_create_sgl(ccb,
 3220             (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK
 3221             );
 3222         if (sge_count < 0)
 3223                 return 1;
 3224         KASSERT(sge_count <= ccb->ccb_sc->sc_max_sgl);
 3225         io_req->RaidContext.numSGE = sge_count;
 3226         io_req->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
 3227         io_req->SGLOffset0 =
 3228             offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4;
 3229 
 3230         io_req->SenseBufferLowAddress = htole32(ccb->ccb_psense);
 3231         io_req->SenseBufferLength = MFI_SENSE_SIZE;
 3232 
 3233         ccb->ccb_flags |= MFI_CCB_F_TBOLT | MFI_CCB_F_TBOLT_IO;
 3234         bus_dmamap_sync(ccb->ccb_sc->sc_dmat,
 3235             MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool),
 3236             ccb->ccb_tb_pio_request -
 3237              MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool),
 3238             MEGASAS_THUNDERBOLT_NEW_MSG_SIZE,
 3239             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3240 
 3241         return 0;
 3242 }
 3243 
 3244 
 3245 static void
 3246 mfi_tbolt_scsi_ld_done(struct mfi_ccb *ccb)
 3247 {
 3248         struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request;
 3249         mfi_scsi_xs_done(ccb, io_req->RaidContext.Status,
 3250             io_req->RaidContext.exStatus);
 3251 }
 3252 
 3253 static int
 3254 mfi_tbolt_create_sgl(struct mfi_ccb *ccb, int flags)
 3255 {
 3256         struct mfi_softc        *sc = ccb->ccb_sc;
 3257         bus_dma_segment_t       *sgd;
 3258         int                     error, i, sge_idx, sge_count;
 3259         struct mfi_mpi2_request_raid_scsi_io *io_req;
 3260         struct mpi25_ieee_sge_chain64 *sgl_ptr;
 3261 
 3262         DNPRINTF(MFI_D_DMA, "%s: mfi_tbolt_create_sgl %#lx\n", DEVNAME(sc),
 3263             (u_long)ccb->ccb_data);
 3264 
 3265         if (!ccb->ccb_data)
 3266                 return -1;
 3267 
 3268         KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p());
 3269         error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap,
 3270             ccb->ccb_data, ccb->ccb_len, NULL, flags);
 3271         if (error) {
 3272                 if (error == EFBIG)
 3273                         aprint_error_dev(sc->sc_dev, "more than %d dma segs\n",
 3274                             sc->sc_max_sgl);
 3275                 else
 3276                         aprint_error_dev(sc->sc_dev,
 3277                             "error %d loading dma map\n", error);
 3278                 return -1;
 3279         }
 3280 
 3281         io_req = ccb->ccb_tb_io_request;
 3282         sgl_ptr = &io_req->SGL.IeeeChain.Chain64;
 3283         sge_count = ccb->ccb_dmamap->dm_nsegs;
 3284         sgd = ccb->ccb_dmamap->dm_segs;
 3285         KASSERT(sge_count <= sc->sc_max_sgl);
 3286         KASSERT(sge_count <=
 3287             (MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1 +
 3288              MEGASAS_THUNDERBOLT_MAX_SGE_IN_CHAINMSG));
 3289 
 3290         if (sge_count > MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG) {
 3291                 /* One element to store the chain info */
 3292                 sge_idx = MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1;
 3293                 DNPRINTF(MFI_D_DMA,
 3294                     "mfi sge_idx %d sge_count %d io_req paddr %jx\n",
 3295                     sge_idx, sge_count, (uintmax_t)ccb->ccb_tb_pio_request);
 3296         } else {
 3297                 sge_idx = sge_count;
 3298         }
 3299 
 3300         for (i = 0; i < sge_idx; i++) {
 3301                 sgl_ptr->Address = htole64(sgd[i].ds_addr);
 3302                 sgl_ptr->Length = htole32(sgd[i].ds_len);
 3303                 sgl_ptr->Flags = 0;
 3304                 if (sge_idx < sge_count) {
 3305                         DNPRINTF(MFI_D_DMA,
 3306                             "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32
 3307                             " flags 0x%x\n", sgl_ptr, i,
 3308                             sgl_ptr->Address, sgl_ptr->Length,
 3309                             sgl_ptr->Flags);
 3310                 }
 3311                 sgl_ptr++;
 3312         }
 3313         io_req->ChainOffset = 0;
 3314         if (sge_idx < sge_count) {
 3315                 struct mpi25_ieee_sge_chain64 *sg_chain;
 3316                 io_req->ChainOffset = MEGASAS_THUNDERBOLT_CHAIN_OFF_MAINMSG;
 3317                 sg_chain = sgl_ptr;
 3318                 /* Prepare chain element */
 3319                 sg_chain->NextChainOffset = 0;
 3320                 sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
 3321                     MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
 3322                 sg_chain->Length =  (sizeof(mpi2_sge_io_union) *
 3323                     (sge_count - sge_idx));
 3324                 sg_chain->Address = ccb->ccb_tb_psg_frame;
 3325                 DNPRINTF(MFI_D_DMA,
 3326                     "sgl %p chain 0x%" PRIx64 " len 0x%" PRIx32
 3327                     " flags 0x%x\n", sg_chain, sg_chain->Address,
 3328                     sg_chain->Length, sg_chain->Flags);
 3329                 sgl_ptr = &ccb->ccb_tb_sg_frame->IeeeChain.Chain64;
 3330                 for (; i < sge_count; i++) {
 3331                         sgl_ptr->Address = htole64(sgd[i].ds_addr);
 3332                         sgl_ptr->Length = htole32(sgd[i].ds_len);
 3333                         sgl_ptr->Flags = 0;
 3334                         DNPRINTF(MFI_D_DMA,
 3335                             "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32
 3336                             " flags 0x%x\n", sgl_ptr, i, sgl_ptr->Address,
 3337                             sgl_ptr->Length, sgl_ptr->Flags);
 3338                         sgl_ptr++;
 3339                 }
 3340                 bus_dmamap_sync(sc->sc_dmat,
 3341                     MFIMEM_MAP(sc->sc_tbolt_reqmsgpool),
 3342                     ccb->ccb_tb_psg_frame - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool),
 3343                     MEGASAS_MAX_SZ_CHAIN_FRAME,  BUS_DMASYNC_PREREAD);
 3344         }
 3345 
 3346         if (ccb->ccb_direction == MFI_DATA_IN) {
 3347                 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
 3348                     ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 3349         } else {
 3350                 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0,
 3351                     ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
 3352         }
 3353         return sge_count;
 3354 }
 3355 
 3356 /*
 3357  * The ThunderBolt HW has an option for the driver to directly
 3358  * access the underlying disks and operate on the RAID.  To
 3359  * do this there needs to be a capability to keep the RAID controller
 3360  * and driver in sync.  The FreeBSD driver does not take advantage
 3361  * of this feature since it adds a lot of complexity and slows down
 3362  * performance.  Performance is gained by using the controller's
 3363  * cache etc.
 3364  *
 3365  * Even though this driver doesn't access the disks directly, an
 3366  * AEN like command is used to inform the RAID firmware to "sync"
 3367  * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command.  This
 3368  * command in write mode will return when the RAID firmware has
 3369  * detected a change to the RAID state.  Examples of this type
 3370  * of change are removing a disk.  Once the command returns then
 3371  * the driver needs to acknowledge this and "sync" all LD's again.
 3372  * This repeats until we shutdown.  Then we need to cancel this
 3373  * pending command.
 3374  *
 3375  * If this is not done right the RAID firmware will not remove a
 3376  * pulled drive and the RAID won't go degraded etc.  Effectively,
 3377  * stopping any RAID mangement to functions.
 3378  *
 3379  * Doing another LD sync, requires the use of an event since the
 3380  * driver needs to do a mfi_wait_command and can't do that in an
 3381  * interrupt thread.
 3382  *
 3383  * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO
 3384  * That requires a bunch of structure and it is simpler to just do
 3385  * the MFI_DCMD_LD_GET_LIST versus walking the RAID map.
 3386  */
 3387 
 3388 void
 3389 mfi_tbolt_sync_map_info(struct work *w, void *v)
 3390 {
 3391         struct mfi_softc *sc = v;
 3392         int i;
 3393         struct mfi_ccb *ccb = NULL;
 3394         union mfi_mbox mbox;
 3395         struct mfi_ld *ld_sync;
 3396         size_t ld_size;
 3397         int s;
 3398 
 3399         DNPRINTF(MFI_D_SYNC, "%s: mfi_tbolt_sync_map_info\n", DEVNAME(sc));
 3400 again:
 3401         ld_sync = NULL;
 3402         s = splbio();
 3403         if (sc->sc_ldsync_ccb != NULL) {
 3404                 splx(s);
 3405                 return;
 3406         }
 3407 
 3408         if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
 3409             sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false)) {
 3410                 aprint_error_dev(sc->sc_dev, "MR_DCMD_LD_GET_LIST failed\n");
 3411                 goto err;
 3412         }
 3413 
 3414         ld_size = sizeof(*ld_sync) * sc->sc_ld_list.mll_no_ld;
 3415 
 3416         ld_sync = malloc(ld_size, M_DEVBUF, M_WAITOK | M_ZERO);
 3417         if (ld_sync == NULL) {
 3418                 aprint_error_dev(sc->sc_dev, "Failed to allocate sync\n");
 3419                 goto err;
 3420         }
 3421         for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
 3422                 ld_sync[i] = sc->sc_ld_list.mll_list[i].mll_ld;
 3423         }
 3424 
 3425         if ((ccb = mfi_get_ccb(sc)) == NULL) {
 3426                 aprint_error_dev(sc->sc_dev, "Failed to get sync command\n");
 3427                 goto err;
 3428         }
 3429         sc->sc_ldsync_ccb = ccb;
 3430 
 3431         memset(&mbox, 0, sizeof(mbox));
 3432         mbox.b[0] = sc->sc_ld_list.mll_no_ld;
 3433         mbox.b[1] = MFI_DCMD_MBOX_PEND_FLAG;
 3434         if (mfi_mgmt(ccb, NULL, MR_DCMD_LD_MAP_GET_INFO, MFI_DATA_OUT,
 3435             ld_size, ld_sync, &mbox)) {
 3436                 aprint_error_dev(sc->sc_dev, "Failed to create sync command\n");
 3437                 goto err;
 3438         }
 3439         /*
 3440          * we won't sleep on this command, so we have to override
 3441          * the callback set up by mfi_mgmt()
 3442          */
 3443         ccb->ccb_done = mfi_sync_map_complete;
 3444 
 3445         mfi_post(sc, ccb);
 3446         splx(s);
 3447         return;
 3448 
 3449 err:
 3450         if (ld_sync)
 3451                 free(ld_sync, M_DEVBUF);
 3452         if (ccb)
 3453                 mfi_put_ccb(ccb);
 3454         sc->sc_ldsync_ccb = NULL;
 3455         splx(s);
 3456         kpause("ldsyncp", 0, hz, NULL);
 3457         goto again;
 3458 }
 3459 
 3460 static void
 3461 mfi_sync_map_complete(struct mfi_ccb *ccb)
 3462 {
 3463         struct mfi_softc *sc = ccb->ccb_sc;
 3464         bool aborted = !sc->sc_running;
 3465 
 3466         DNPRINTF(MFI_D_SYNC, "%s: mfi_sync_map_complete\n",
 3467             DEVNAME(ccb->ccb_sc));
 3468         KASSERT(sc->sc_ldsync_ccb == ccb);
 3469         mfi_mgmt_done(ccb);
 3470         free(ccb->ccb_data, M_DEVBUF);
 3471         if (ccb->ccb_flags & MFI_CCB_F_ERR) {
 3472                 aprint_error_dev(sc->sc_dev, "sync command failed\n");
 3473                 aborted = true;
 3474         }
 3475         mfi_put_ccb(ccb);
 3476         sc->sc_ldsync_ccb = NULL;
 3477 
 3478         /* set it up again so the driver can catch more events */
 3479         if (!aborted) {
 3480                 workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL);
 3481         }
 3482 }
 3483 
 3484 static int
 3485 mfifopen(dev_t dev, int flag, int mode, struct lwp *l)
 3486 {
 3487         struct mfi_softc *sc;
 3488 
 3489         if ((sc = device_lookup_private(&mfi_cd, minor(dev))) == NULL)
 3490                 return (ENXIO);
 3491         return (0);
 3492 }
 3493 
 3494 static int
 3495 mfifclose(dev_t dev, int flag, int mode, struct lwp *l)
 3496 {
 3497         return (0);
 3498 }
 3499 
 3500 static int
 3501 mfifioctl(dev_t dev, u_long cmd, void *data, int flag,
 3502     struct lwp *l)
 3503 {
 3504         struct mfi_softc *sc;
 3505         struct mfi_ioc_packet *ioc = data;
 3506         uint8_t *udata;
 3507         struct mfi_ccb *ccb = NULL;
 3508         int ctx, i, s, error;
 3509         union mfi_sense_ptr sense_ptr;
 3510 
 3511         switch (cmd) {
 3512         case MFI_CMD:
 3513                 sc = device_lookup_private(&mfi_cd, ioc->mfi_adapter_no);
 3514                 break;
 3515         default:
 3516                 return ENOTTY;
 3517         }
 3518         if (sc == NULL)
 3519                 return (ENXIO);
 3520         if (sc->sc_opened)
 3521                 return (EBUSY);
 3522 
 3523         switch (cmd) {
 3524         case MFI_CMD:
 3525                 error = kauth_authorize_device_passthru(l->l_cred, dev,
 3526                     KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
 3527                 if (error)
 3528                         return error;
 3529                 if (ioc->mfi_sge_count > MAX_IOCTL_SGE)
 3530                         return EINVAL;
 3531                 s = splbio();
 3532                 if ((ccb = mfi_get_ccb(sc)) == NULL)
 3533                         return ENOMEM;
 3534                 ccb->ccb_data = NULL;
 3535                 ctx = ccb->ccb_frame->mfr_header.mfh_context;
 3536                 memcpy(ccb->ccb_frame, ioc->mfi_frame.raw,
 3537                    sizeof(*ccb->ccb_frame));
 3538                 ccb->ccb_frame->mfr_header.mfh_context = ctx;
 3539                 ccb->ccb_frame->mfr_header.mfh_scsi_status = 0;
 3540                 ccb->ccb_frame->mfr_header.mfh_pad0 = 0;
 3541                 ccb->ccb_frame_size =
 3542                     (sizeof(union mfi_sgl) * ioc->mfi_sge_count) +
 3543                     ioc->mfi_sgl_off;
 3544                 if (ioc->mfi_sge_count > 0) {
 3545                         ccb->ccb_sgl = (union mfi_sgl *)
 3546                             &ccb->ccb_frame->mfr_bytes[ioc->mfi_sgl_off];
 3547                 }
 3548                 if (ccb->ccb_frame->mfr_header.mfh_flags & MFI_FRAME_DIR_READ)
 3549                         ccb->ccb_direction = MFI_DATA_IN;
 3550                 if (ccb->ccb_frame->mfr_header.mfh_flags & MFI_FRAME_DIR_WRITE)
 3551                         ccb->ccb_direction = MFI_DATA_OUT;
 3552                 ccb->ccb_len = ccb->ccb_frame->mfr_header.mfh_data_len;
 3553                 if (ccb->ccb_len > MAXPHYS) {
 3554                         error = ENOMEM;
 3555                         goto out;
 3556                 }
 3557                 if (ccb->ccb_len &&
 3558                     (ccb->ccb_direction & (MFI_DATA_IN | MFI_DATA_OUT)) != 0) {
 3559                         udata = malloc(ccb->ccb_len, M_DEVBUF, M_WAITOK|M_ZERO);
 3560                         if (udata == NULL) {
 3561                                 error = ENOMEM;
 3562                                 goto out;
 3563                         }
 3564                         ccb->ccb_data = udata;
 3565                         if (ccb->ccb_direction & MFI_DATA_OUT) {
 3566                                 for (i = 0; i < ioc->mfi_sge_count; i++) {
 3567                                         error = copyin(ioc->mfi_sgl[i].iov_base,
 3568                                             udata, ioc->mfi_sgl[i].iov_len);
 3569                                         if (error)
 3570                                                 goto out;
 3571                                         udata = &udata[
 3572                                             ioc->mfi_sgl[i].iov_len];
 3573                                 }
 3574                         }
 3575                         if (mfi_create_sgl(ccb, BUS_DMA_WAITOK)) {
 3576                                 error = EIO;
 3577                                 goto out;
 3578                         }
 3579                 }
 3580                 if (ccb->ccb_frame->mfr_header.mfh_cmd == MFI_CMD_PD_SCSI_IO) {
 3581                         ccb->ccb_frame->mfr_io.mif_sense_addr_lo =
 3582                             htole32(ccb->ccb_psense);
 3583                         ccb->ccb_frame->mfr_io.mif_sense_addr_hi = 0;
 3584                 }
 3585                 ccb->ccb_done = mfi_mgmt_done;
 3586                 mfi_post(sc, ccb);
 3587                 while (ccb->ccb_state != MFI_CCB_DONE)
 3588                         tsleep(ccb, PRIBIO, "mfi_fioc", 0);
 3589 
 3590                 if (ccb->ccb_direction & MFI_DATA_IN) {
 3591                         udata = ccb->ccb_data;
 3592                         for (i = 0; i < ioc->mfi_sge_count; i++) {
 3593                                 error = copyout(udata,
 3594                                     ioc->mfi_sgl[i].iov_base,
 3595                                     ioc->mfi_sgl[i].iov_len);
 3596                                 if (error)
 3597                                         goto out;
 3598                                 udata = &udata[
 3599                                     ioc->mfi_sgl[i].iov_len];
 3600                         }
 3601                 }
 3602                 if (ioc->mfi_sense_len) {
 3603                         memcpy(&sense_ptr.sense_ptr_data[0],
 3604                         &ioc->mfi_frame.raw[ioc->mfi_sense_off],
 3605                         sizeof(sense_ptr.sense_ptr_data));
 3606                         error = copyout(ccb->ccb_sense,
 3607                             sense_ptr.user_space,
 3608                             sizeof(sense_ptr.sense_ptr_data));
 3609                         if (error)
 3610                                 goto out;
 3611                 }
 3612                 memcpy(ioc->mfi_frame.raw, ccb->ccb_frame,
 3613                    sizeof(*ccb->ccb_frame));
 3614                 break;
 3615         default:
 3616                 printf("mfifioctl unhandled cmd 0x%lx\n", cmd);
 3617                 return ENOTTY;
 3618         }
 3619 
 3620 out:
 3621         if (ccb->ccb_data)
 3622                 free(ccb->ccb_data, M_DEVBUF);
 3623         if (ccb)
 3624                 mfi_put_ccb(ccb);
 3625         splx(s);
 3626         return error;
 3627 }

Cache object: 7a71b70dea990ace49347fca323c5026


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.