The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/pci/mly.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: mly.c,v 1.21 2003/06/29 22:30:25 fvdl Exp $    */
    2 
    3 /*-
    4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Andrew Doran, Thor Lancelot Simon, and Eric Haszlakiewicz.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. All advertising materials mentioning features or use of this software
   19  *    must display the following acknowledgement:
   20  *        This product includes software developed by the NetBSD
   21  *        Foundation, Inc. and its contributors.
   22  * 4. Neither the name of The NetBSD Foundation nor the names of its
   23  *    contributors may be used to endorse or promote products derived
   24  *    from this software without specific prior written permission.
   25  *
   26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   36  * POSSIBILITY OF SUCH DAMAGE.
   37  */
   38 
   39 /*-
   40  * Copyright (c) 2000, 2001 Michael Smith
   41  * Copyright (c) 2000 BSDi
   42  * All rights reserved.
   43  *
   44  * Redistribution and use in source and binary forms, with or without
   45  * modification, are permitted provided that the following conditions
   46  * are met:
   47  * 1. Redistributions of source code must retain the above copyright
   48  *    notice, this list of conditions and the following disclaimer.
   49  * 2. Redistributions in binary form must reproduce the above copyright
   50  *    notice, this list of conditions and the following disclaimer in the
   51  *    documentation and/or other materials provided with the distribution.
   52  *
   53  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   63  * SUCH DAMAGE.
   64  *
   65  * from FreeBSD: mly.c,v 1.8 2001/07/14 00:12:22 msmith Exp
   66  */
   67 
   68 /*
   69  * Driver for the Mylex AcceleRAID and eXtremeRAID family with v6 firmware.
   70  *
   71  * TODO:
   72  *
   73  * o Make mly->mly_btl a hash, then MLY_BTL_RESCAN becomes a SIMPLEQ.
   74  * o Handle FC and multiple LUNs.
   75  * o Fix mmbox usage.
   76  * o Fix transfer speed fudge.
   77  */
   78 
   79 #include <sys/cdefs.h>
   80 __KERNEL_RCSID(0, "$NetBSD: mly.c,v 1.21 2003/06/29 22:30:25 fvdl Exp $");
   81 
   82 #include <sys/param.h>
   83 #include <sys/systm.h>
   84 #include <sys/device.h>
   85 #include <sys/kernel.h>
   86 #include <sys/queue.h>
   87 #include <sys/buf.h>
   88 #include <sys/endian.h>
   89 #include <sys/conf.h>
   90 #include <sys/malloc.h>
   91 #include <sys/ioctl.h>
   92 #include <sys/scsiio.h>
   93 #include <sys/kthread.h>
   94 
   95 #include <uvm/uvm_extern.h>
   96 
   97 #include <machine/bus.h>
   98 
   99 #include <dev/scsipi/scsi_all.h>
  100 #include <dev/scsipi/scsipi_all.h>
  101 #include <dev/scsipi/scsiconf.h>
  102 
  103 #include <dev/pci/pcireg.h>
  104 #include <dev/pci/pcivar.h>
  105 #include <dev/pci/pcidevs.h>
  106 
  107 #include <dev/pci/mlyreg.h>
  108 #include <dev/pci/mlyio.h>
  109 #include <dev/pci/mlyvar.h>
  110 #include <dev/pci/mly_tables.h>
  111 
  112 static void     mly_attach(struct device *, struct device *, void *);
  113 static int      mly_match(struct device *, struct cfdata *, void *);
  114 static const    struct mly_ident *mly_find_ident(struct pci_attach_args *);
  115 static int      mly_fwhandshake(struct mly_softc *);
  116 static int      mly_flush(struct mly_softc *);
  117 static int      mly_intr(void *);
  118 static void     mly_shutdown(void *);
  119 
  120 static int      mly_alloc_ccbs(struct mly_softc *);
  121 static void     mly_check_event(struct mly_softc *);
  122 static void     mly_complete_event(struct mly_softc *, struct mly_ccb *);
  123 static void     mly_complete_rescan(struct mly_softc *, struct mly_ccb *);
  124 static int      mly_dmamem_alloc(struct mly_softc *, int, bus_dmamap_t *, 
  125                                  caddr_t *, bus_addr_t *, bus_dma_segment_t *);
  126 static void     mly_dmamem_free(struct mly_softc *, int, bus_dmamap_t, 
  127                                 caddr_t, bus_dma_segment_t *);
  128 static int      mly_enable_mmbox(struct mly_softc *);
  129 static void     mly_fetch_event(struct mly_softc *);
  130 static int      mly_get_controllerinfo(struct mly_softc *);
  131 static int      mly_get_eventstatus(struct mly_softc *);
  132 static int      mly_ioctl(struct mly_softc *, struct mly_cmd_ioctl *,
  133                           void **, size_t, void *, size_t *);
  134 static void     mly_padstr(char *, const char *, int);
  135 static void     mly_process_event(struct mly_softc *, struct mly_event *);
  136 static void     mly_release_ccbs(struct mly_softc *);
  137 static int      mly_scan_btl(struct mly_softc *, int, int);
  138 static void     mly_scan_channel(struct mly_softc *, int);
  139 static void     mly_thread(void *);
  140 static void     mly_thread_create(void *);
  141 
  142 static int      mly_ccb_alloc(struct mly_softc *, struct mly_ccb **);
  143 static void     mly_ccb_complete(struct mly_softc *, struct mly_ccb *);
  144 static void     mly_ccb_enqueue(struct mly_softc *, struct mly_ccb *);
  145 static void     mly_ccb_free(struct mly_softc *, struct mly_ccb *);
  146 static int      mly_ccb_map(struct mly_softc *, struct mly_ccb *);
  147 static int      mly_ccb_poll(struct mly_softc *, struct mly_ccb *, int);
  148 static int      mly_ccb_submit(struct mly_softc *, struct mly_ccb *);
  149 static void     mly_ccb_unmap(struct mly_softc *, struct mly_ccb *);
  150 static int      mly_ccb_wait(struct mly_softc *, struct mly_ccb *, int);
  151 
  152 static void     mly_get_xfer_mode(struct mly_softc *, int, 
  153                                   struct scsipi_xfer_mode *);
  154 static void     mly_scsipi_complete(struct mly_softc *, struct mly_ccb *);
  155 static int      mly_scsipi_ioctl(struct scsipi_channel *, u_long, caddr_t,
  156                                  int, struct proc *);
  157 static void     mly_scsipi_minphys(struct buf *);
  158 static void     mly_scsipi_request(struct scsipi_channel *,
  159                                    scsipi_adapter_req_t, void *);
  160 
  161 static int      mly_user_command(struct mly_softc *, struct mly_user_command *);
  162 static int      mly_user_health(struct mly_softc *, struct mly_user_health *);
  163 
  164 extern struct   cfdriver mly_cd;
  165 
  166 CFATTACH_DECL(mly, sizeof(struct mly_softc),
  167     mly_match, mly_attach, NULL, NULL);
  168 
  169 dev_type_open(mlyopen);
  170 dev_type_close(mlyclose);
  171 dev_type_ioctl(mlyioctl);
  172 
  173 const struct cdevsw mly_cdevsw = {
  174         mlyopen, mlyclose, noread, nowrite, mlyioctl,
  175         nostop, notty, nopoll, nommap, nokqfilter,
  176 };
  177 
  178 struct mly_ident {
  179         u_short vendor;
  180         u_short product;
  181         u_short subvendor;
  182         u_short subproduct;
  183         int     hwif;
  184         const char      *desc;
  185 } static const mly_ident[] = {
  186         {
  187                 PCI_VENDOR_MYLEX,
  188                 PCI_PRODUCT_MYLEX_EXTREMERAID,
  189                 PCI_VENDOR_MYLEX,
  190                 0x0040,
  191                 MLY_HWIF_STRONGARM,
  192                 "eXtremeRAID 2000"
  193         },
  194         {
  195                 PCI_VENDOR_MYLEX,
  196                 PCI_PRODUCT_MYLEX_EXTREMERAID,
  197                 PCI_VENDOR_MYLEX,
  198                 0x0030,
  199                 MLY_HWIF_STRONGARM,
  200                 "eXtremeRAID 3000"
  201         },
  202         {
  203                 PCI_VENDOR_MYLEX,
  204                 PCI_PRODUCT_MYLEX_ACCELERAID,
  205                 PCI_VENDOR_MYLEX,
  206                 0x0050,
  207                 MLY_HWIF_I960RX,
  208                 "AcceleRAID 352"
  209         },
  210         {
  211                 PCI_VENDOR_MYLEX,
  212                 PCI_PRODUCT_MYLEX_ACCELERAID,
  213                 PCI_VENDOR_MYLEX,
  214                 0x0052,
  215                 MLY_HWIF_I960RX,
  216                 "AcceleRAID 170"
  217         },
  218         {
  219                 PCI_VENDOR_MYLEX,
  220                 PCI_PRODUCT_MYLEX_ACCELERAID,
  221                 PCI_VENDOR_MYLEX,
  222                 0x0054,
  223                 MLY_HWIF_I960RX,
  224                 "AcceleRAID 160"
  225         },
  226 };
  227 
  228 static void     *mly_sdh;
  229 
  230 /*
  231  * Try to find a `mly_ident' entry corresponding to this board.
  232  */
  233 static const struct mly_ident *
  234 mly_find_ident(struct pci_attach_args *pa)
  235 {
  236         const struct mly_ident *mpi, *maxmpi;
  237         pcireg_t reg;
  238 
  239         mpi = mly_ident;
  240         maxmpi = mpi + sizeof(mly_ident) / sizeof(mly_ident[0]);
  241 
  242         if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O)
  243                 return (NULL);
  244 
  245         for (; mpi < maxmpi; mpi++) {
  246                 if (PCI_VENDOR(pa->pa_id) != mpi->vendor ||
  247                     PCI_PRODUCT(pa->pa_id) != mpi->product)
  248                         continue;
  249 
  250                 if (mpi->subvendor == 0x0000)
  251                         return (mpi);
  252 
  253                 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
  254 
  255                 if (PCI_VENDOR(reg) == mpi->subvendor &&
  256                     PCI_PRODUCT(reg) == mpi->subproduct)
  257                         return (mpi);
  258         }
  259 
  260         return (NULL);
  261 }
  262 
  263 /*
  264  * Match a supported board.
  265  */
  266 static int
  267 mly_match(struct device *parent, struct cfdata *cfdata, void *aux)
  268 {
  269 
  270         return (mly_find_ident(aux) != NULL);
  271 }
  272 
  273 /*
  274  * Attach a supported board.
  275  */
  276 static void
  277 mly_attach(struct device *parent, struct device *self, void *aux)
  278 {
  279         struct pci_attach_args *pa;
  280         struct mly_softc *mly;
  281         struct mly_ioctl_getcontrollerinfo *mi;
  282         const struct mly_ident *ident;
  283         pci_chipset_tag_t pc;
  284         pci_intr_handle_t ih;
  285         bus_space_handle_t memh, ioh;
  286         bus_space_tag_t memt, iot;
  287         pcireg_t reg;
  288         const char *intrstr;
  289         int ior, memr, i, rv, state;
  290         struct scsipi_adapter *adapt;
  291         struct scsipi_channel *chan;
  292 
  293         mly = (struct mly_softc *)self;
  294         pa = aux;
  295         pc = pa->pa_pc;
  296         ident = mly_find_ident(pa);
  297         state = 0;
  298 
  299         mly->mly_dmat = pa->pa_dmat;
  300         mly->mly_hwif = ident->hwif;
  301 
  302         printf(": Mylex %s\n", ident->desc);
  303 
  304         /*
  305          * Map the PCI register window.
  306          */
  307         memr = -1;
  308         ior = -1;
  309 
  310         for (i = 0x10; i <= 0x14; i += 4) {
  311                 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, i);
  312 
  313                 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) {
  314                         if (ior == -1 && PCI_MAPREG_IO_SIZE(reg) != 0)
  315                                 ior = i;
  316                 } else {
  317                         if (memr == -1 && PCI_MAPREG_MEM_SIZE(reg) != 0)
  318                                 memr = i;
  319                 }
  320         }
  321 
  322         if (memr != -1)
  323                 if (pci_mapreg_map(pa, memr, PCI_MAPREG_TYPE_MEM, 0,
  324                     &memt, &memh, NULL, NULL))
  325                         memr = -1;
  326         if (ior != -1)
  327                 if (pci_mapreg_map(pa, ior, PCI_MAPREG_TYPE_IO, 0,
  328                     &iot, &ioh, NULL, NULL))
  329                         ior = -1;
  330 
  331         if (memr != -1) {
  332                 mly->mly_iot = memt;
  333                 mly->mly_ioh = memh;
  334         } else if (ior != -1) {
  335                 mly->mly_iot = iot;
  336                 mly->mly_ioh = ioh;
  337         } else {
  338                 printf("%s: can't map i/o or memory space\n", self->dv_xname);
  339                 return;
  340         }
  341 
  342         /*
  343          * Enable the device.
  344          */
  345         reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
  346         pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
  347             reg | PCI_COMMAND_MASTER_ENABLE);
  348 
  349         /*
  350          * Map and establish the interrupt.
  351          */
  352         if (pci_intr_map(pa, &ih)) {
  353                 printf("%s: can't map interrupt\n", self->dv_xname);
  354                 return;
  355         }
  356         intrstr = pci_intr_string(pc, ih);
  357         mly->mly_ih = pci_intr_establish(pc, ih, IPL_BIO, mly_intr, mly);
  358         if (mly->mly_ih == NULL) {
  359                 printf("%s: can't establish interrupt", self->dv_xname);
  360                 if (intrstr != NULL)
  361                         printf(" at %s", intrstr);
  362                 printf("\n");
  363                 return;
  364         }
  365 
  366         if (intrstr != NULL)
  367                 printf("%s: interrupting at %s\n", mly->mly_dv.dv_xname,
  368                     intrstr);
  369 
  370         /*
  371          * Take care of interface-specific tasks.
  372          */
  373         switch (mly->mly_hwif) {
  374         case MLY_HWIF_I960RX:
  375                 mly->mly_doorbell_true = 0x00;
  376                 mly->mly_cmd_mailbox = MLY_I960RX_COMMAND_MAILBOX;
  377                 mly->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX;
  378                 mly->mly_idbr = MLY_I960RX_IDBR;
  379                 mly->mly_odbr = MLY_I960RX_ODBR;
  380                 mly->mly_error_status = MLY_I960RX_ERROR_STATUS;
  381                 mly->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS;
  382                 mly->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK;
  383                 break;
  384 
  385         case MLY_HWIF_STRONGARM:
  386                 mly->mly_doorbell_true = 0xff;
  387                 mly->mly_cmd_mailbox = MLY_STRONGARM_COMMAND_MAILBOX;
  388                 mly->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX;
  389                 mly->mly_idbr = MLY_STRONGARM_IDBR;
  390                 mly->mly_odbr = MLY_STRONGARM_ODBR;
  391                 mly->mly_error_status = MLY_STRONGARM_ERROR_STATUS;
  392                 mly->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS;
  393                 mly->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK;
  394                 break;
  395         }
  396 
  397         /*
  398          * Allocate and map the scatter/gather lists.
  399          */
  400         rv = mly_dmamem_alloc(mly, MLY_SGL_SIZE * MLY_MAX_CCBS,
  401             &mly->mly_sg_dmamap, (caddr_t *)&mly->mly_sg,
  402             &mly->mly_sg_busaddr, &mly->mly_sg_seg);
  403         if (rv) {
  404                 printf("%s: unable to allocate S/G maps\n",
  405                     mly->mly_dv.dv_xname);
  406                 goto bad;
  407         }
  408         state++;
  409 
  410         /*
  411          * Allocate and map the memory mailbox.
  412          */
  413         rv = mly_dmamem_alloc(mly, sizeof(struct mly_mmbox),
  414             &mly->mly_mmbox_dmamap, (caddr_t *)&mly->mly_mmbox,
  415             &mly->mly_mmbox_busaddr, &mly->mly_mmbox_seg);
  416         if (rv) {
  417                 printf("%s: unable to allocate mailboxes\n",
  418                     mly->mly_dv.dv_xname);
  419                 goto bad;
  420         }
  421         state++;
  422 
  423         /*
  424          * Initialise per-controller queues.
  425          */
  426         SLIST_INIT(&mly->mly_ccb_free);
  427         SIMPLEQ_INIT(&mly->mly_ccb_queue);
  428 
  429         /*
  430          * Disable interrupts before we start talking to the controller.
  431          */
  432         mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_DISABLE);
  433 
  434         /* 
  435          * Wait for the controller to come ready, handshaking with the
  436          * firmware if required.  This is typically only necessary on
  437          * platforms where the controller BIOS does not run.
  438          */
  439         if (mly_fwhandshake(mly)) {
  440                 printf("%s: unable to bring controller online\n",
  441                     mly->mly_dv.dv_xname);
  442                 goto bad;
  443         }
  444 
  445         /*
  446          * Allocate initial command buffers, obtain controller feature
  447          * information, and then reallocate command buffers, since we'll
  448          * know how many we want.
  449          */
  450         if (mly_alloc_ccbs(mly)) {
  451                 printf("%s: unable to allocate CCBs\n",
  452                     mly->mly_dv.dv_xname);
  453                 goto bad;
  454         }
  455         state++;
  456         if (mly_get_controllerinfo(mly)) {
  457                 printf("%s: unable to retrieve controller info\n",
  458                     mly->mly_dv.dv_xname);
  459                 goto bad;
  460         }
  461         mly_release_ccbs(mly);
  462         if (mly_alloc_ccbs(mly)) {
  463                 printf("%s: unable to allocate CCBs\n",
  464                     mly->mly_dv.dv_xname);
  465                 state--;
  466                 goto bad;
  467         }
  468 
  469         /*
  470          * Get the current event counter for health purposes, populate the
  471          * initial health status buffer.
  472          */
  473         if (mly_get_eventstatus(mly)) {
  474                 printf("%s: unable to retrieve event status\n",
  475                     mly->mly_dv.dv_xname);
  476                 goto bad;
  477         }
  478 
  479         /*
  480          * Enable memory-mailbox mode.
  481          */
  482         if (mly_enable_mmbox(mly)) {
  483                 printf("%s: unable to enable memory mailbox\n",
  484                     mly->mly_dv.dv_xname);
  485                 goto bad;
  486         }
  487 
  488         /* 
  489          * Print a little information about the controller.
  490          */
  491         mi = mly->mly_controllerinfo;
  492 
  493         printf("%s: %d physical channel%s, firmware %d.%02d-%d-%02d "
  494             "(%02d%02d%02d%02d), %dMB RAM\n", mly->mly_dv.dv_xname,
  495             mi->physical_channels_present,
  496             (mi->physical_channels_present) > 1 ? "s" : "",
  497             mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build,
  498             mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day,
  499             le16toh(mi->memory_size));
  500 
  501         /*
  502          * Register our `shutdownhook'.
  503          */
  504         if (mly_sdh == NULL)
  505                 shutdownhook_establish(mly_shutdown, NULL);
  506 
  507         /*
  508          * Clear any previous BTL information.  For each bus that scsipi
  509          * wants to scan, we'll receive the SCBUSIOLLSCAN ioctl and retrieve
  510          * all BTL info at that point.
  511          */
  512         memset(&mly->mly_btl, 0, sizeof(mly->mly_btl));
  513 
  514         mly->mly_nchans = mly->mly_controllerinfo->physical_channels_present +
  515             mly->mly_controllerinfo->virtual_channels_present;
  516 
  517         /*
  518          * Attach to scsipi.
  519          */
  520         adapt = &mly->mly_adapt;
  521         memset(adapt, 0, sizeof(*adapt));
  522         adapt->adapt_dev = &mly->mly_dv;
  523         adapt->adapt_nchannels = mly->mly_nchans;
  524         adapt->adapt_openings = mly->mly_ncmds - MLY_CCBS_RESV;
  525         adapt->adapt_max_periph = mly->mly_ncmds - MLY_CCBS_RESV;
  526         adapt->adapt_request = mly_scsipi_request;
  527         adapt->adapt_minphys = mly_scsipi_minphys;
  528         adapt->adapt_ioctl = mly_scsipi_ioctl;
  529 
  530         for (i = 0; i < mly->mly_nchans; i++) {
  531                 chan = &mly->mly_chans[i];
  532                 memset(chan, 0, sizeof(*chan));
  533                 chan->chan_adapter = adapt;
  534                 chan->chan_bustype = &scsi_bustype;
  535                 chan->chan_channel = i;
  536                 chan->chan_ntargets = MLY_MAX_TARGETS;
  537                 chan->chan_nluns = MLY_MAX_LUNS;
  538                 chan->chan_id = mly->mly_controllerparam->initiator_id;
  539                 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
  540                 config_found(&mly->mly_dv, chan, scsiprint);
  541         }
  542 
  543         /*
  544          * Now enable interrupts...
  545          */
  546         mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_ENABLE);
  547 
  548         /*
  549          * Finally, create our monitoring thread.
  550          */
  551         kthread_create(mly_thread_create, mly);
  552 
  553         mly->mly_state |= MLY_STATE_INITOK;
  554         return;
  555 
  556  bad:
  557         if (state > 2)
  558                 mly_release_ccbs(mly);
  559         if (state > 1)
  560                 mly_dmamem_free(mly, sizeof(struct mly_mmbox),
  561                     mly->mly_mmbox_dmamap, (caddr_t)mly->mly_mmbox,
  562                     &mly->mly_mmbox_seg);
  563         if (state > 0)
  564                 mly_dmamem_free(mly, MLY_SGL_SIZE * MLY_MAX_CCBS,
  565                     mly->mly_sg_dmamap, (caddr_t)mly->mly_sg,
  566                     &mly->mly_sg_seg);
  567 }
  568 
  569 /*
  570  * Scan all possible devices on the specified channel.
  571  */
  572 static void
  573 mly_scan_channel(struct mly_softc *mly, int bus)
  574 {
  575         int s, target;
  576 
  577         for (target = 0; target < MLY_MAX_TARGETS; target++) {
  578                 s = splbio();
  579                 if (!mly_scan_btl(mly, bus, target)) {
  580                         tsleep(&mly->mly_btl[bus][target], PRIBIO, "mlyscan",
  581                             0);
  582                 }
  583                 splx(s);
  584         }
  585 }
  586 
  587 /*
  588  * Shut down all configured `mly' devices.
  589  */
  590 static void
  591 mly_shutdown(void *cookie)
  592 {
  593         struct mly_softc *mly;
  594         int i;
  595 
  596         for (i = 0; i < mly_cd.cd_ndevs; i++) {
  597                 if ((mly = device_lookup(&mly_cd, i)) == NULL)
  598                         continue;
  599 
  600                 if (mly_flush(mly))
  601                         printf("%s: unable to flush cache\n",
  602                             mly->mly_dv.dv_xname);
  603         }
  604 }
  605 
  606 /*
  607  * Fill in the mly_controllerinfo and mly_controllerparam fields in the
  608  * softc.
  609  */
  610 static int
  611 mly_get_controllerinfo(struct mly_softc *mly)
  612 {
  613         struct mly_cmd_ioctl mci;
  614         int rv;
  615 
  616         /*
  617          * Build the getcontrollerinfo ioctl and send it.
  618          */
  619         memset(&mci, 0, sizeof(mci));
  620         mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO;
  621         rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerinfo,
  622             sizeof(*mly->mly_controllerinfo), NULL, NULL);
  623         if (rv != 0)
  624                 return (rv);
  625 
  626         /*
  627          * Build the getcontrollerparameter ioctl and send it.
  628          */
  629         memset(&mci, 0, sizeof(mci));
  630         mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER;
  631         rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerparam,
  632             sizeof(*mly->mly_controllerparam), NULL, NULL);
  633 
  634         return (rv);
  635 }
  636 
  637 /*
  638  * Rescan a device, possibly as a consequence of getting an event which
  639  * suggests that it may have changed.  Must be called with interrupts
  640  * blocked.
  641  */
  642 static int
  643 mly_scan_btl(struct mly_softc *mly, int bus, int target)
  644 {
  645         struct mly_ccb *mc;
  646         struct mly_cmd_ioctl *mci;
  647         int rv;
  648 
  649         if (target == mly->mly_controllerparam->initiator_id) {
  650                 mly->mly_btl[bus][target].mb_flags = MLY_BTL_PROTECTED;
  651                 return (EIO);
  652         }
  653 
  654         /* Don't re-scan if a scan is already in progress. */
  655         if ((mly->mly_btl[bus][target].mb_flags & MLY_BTL_SCANNING) != 0)
  656                 return (EBUSY);
  657 
  658         /* Get a command. */
  659         if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
  660                 return (rv);
  661 
  662         /* Set up the data buffer. */
  663         mc->mc_data = malloc(sizeof(union mly_devinfo), 
  664             M_DEVBUF, M_NOWAIT|M_ZERO);
  665 
  666         mc->mc_flags |= MLY_CCB_DATAIN;
  667         mc->mc_complete = mly_complete_rescan;
  668 
  669         /* 
  670          * Build the ioctl.
  671          */
  672         mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
  673         mci->opcode = MDACMD_IOCTL;
  674         mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
  675         memset(&mci->param, 0, sizeof(mci->param));
  676 
  677         if (MLY_BUS_IS_VIRTUAL(mly, bus)) {
  678                 mc->mc_length = sizeof(struct mly_ioctl_getlogdevinfovalid);
  679                 mci->data_size = htole32(mc->mc_length);
  680                 mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
  681                 _lto3l(MLY_LOGADDR(0, MLY_LOGDEV_ID(mly, bus, target)),
  682                     mci->addr);
  683         } else {
  684                 mc->mc_length = sizeof(struct mly_ioctl_getphysdevinfovalid);
  685                 mci->data_size = htole32(mc->mc_length);
  686                 mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
  687                 _lto3l(MLY_PHYADDR(0, bus, target, 0), mci->addr);
  688         }
  689 
  690         /*
  691          * Dispatch the command.
  692          */
  693         if ((rv = mly_ccb_map(mly, mc)) != 0) {
  694                 free(mc->mc_data, M_DEVBUF);
  695                 mly_ccb_free(mly, mc);
  696                 return(rv);
  697         }
  698 
  699         mly->mly_btl[bus][target].mb_flags |= MLY_BTL_SCANNING;
  700         mly_ccb_enqueue(mly, mc);
  701         return (0);
  702 }
  703 
  704 /*
  705  * Handle the completion of a rescan operation.
  706  */
  707 static void
  708 mly_complete_rescan(struct mly_softc *mly, struct mly_ccb *mc)
  709 {
  710         struct mly_ioctl_getlogdevinfovalid *ldi;
  711         struct mly_ioctl_getphysdevinfovalid *pdi;
  712         struct mly_cmd_ioctl *mci;
  713         struct mly_btl btl, *btlp;
  714         struct scsipi_xfer_mode xm;
  715         int bus, target, rescan;
  716         u_int tmp;
  717 
  718         mly_ccb_unmap(mly, mc);
  719 
  720         /*
  721          * Recover the bus and target from the command.  We need these even
  722          * in the case where we don't have a useful response.
  723          */
  724         mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
  725         tmp = _3ltol(mci->addr);
  726         rescan = 0;
  727 
  728         if (mci->sub_ioctl == MDACIOCTL_GETLOGDEVINFOVALID) {
  729                 bus = MLY_LOGDEV_BUS(mly, MLY_LOGADDR_DEV(tmp));
  730                 target = MLY_LOGDEV_TARGET(mly, MLY_LOGADDR_DEV(tmp));
  731         } else {
  732                 bus = MLY_PHYADDR_CHANNEL(tmp);
  733                 target = MLY_PHYADDR_TARGET(tmp);
  734         }
  735 
  736         btlp = &mly->mly_btl[bus][target];
  737 
  738         /* The default result is 'no device'. */
  739         memset(&btl, 0, sizeof(btl));
  740         btl.mb_flags = MLY_BTL_PROTECTED;
  741 
  742         /* If the rescan completed OK, we have possibly-new BTL data. */
  743         if (mc->mc_status != 0)
  744                 goto out;
  745 
  746         if (mc->mc_length == sizeof(*ldi)) {
  747                 ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
  748                 tmp = le32toh(ldi->logical_device_number);
  749 
  750                 if (MLY_LOGDEV_BUS(mly, tmp) != bus ||
  751                     MLY_LOGDEV_TARGET(mly, tmp) != target) {
  752 #ifdef MLYDEBUG
  753                         printf("%s: WARNING: BTL rescan (logical) for %d:%d "
  754                             "returned data for %d:%d instead\n", 
  755                            mly->mly_dv.dv_xname, bus, target,
  756                            MLY_LOGDEV_BUS(mly, tmp),
  757                            MLY_LOGDEV_TARGET(mly, tmp));
  758 #endif
  759                         goto out;
  760                 }
  761 
  762                 btl.mb_flags = MLY_BTL_LOGICAL | MLY_BTL_TQING;
  763                 btl.mb_type = ldi->raid_level;
  764                 btl.mb_state = ldi->state;
  765         } else if (mc->mc_length == sizeof(*pdi)) {
  766                 pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
  767 
  768                 if (pdi->channel != bus || pdi->target != target) {
  769 #ifdef MLYDEBUG
  770                         printf("%s: WARNING: BTL rescan (physical) for %d:%d "
  771                             " returned data for %d:%d instead\n", 
  772                            mly->mly_dv.dv_xname,
  773                            bus, target, pdi->channel, pdi->target);
  774 #endif
  775                         goto out;
  776                 }
  777 
  778                 btl.mb_flags = MLY_BTL_PHYSICAL;
  779                 btl.mb_type = MLY_DEVICE_TYPE_PHYSICAL;
  780                 btl.mb_state = pdi->state;
  781                 btl.mb_speed = pdi->speed;
  782                 btl.mb_width = pdi->width;
  783 
  784                 if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
  785                         btl.mb_flags |= MLY_BTL_PROTECTED;
  786                 if (pdi->command_tags != 0)
  787                         btl.mb_flags |= MLY_BTL_TQING;
  788         } else {
  789                 printf("%s: BTL rescan result invalid\n", mly->mly_dv.dv_xname);
  790                 goto out;
  791         }
  792 
  793         /* Decide whether we need to rescan the device. */
  794         if (btl.mb_flags != btlp->mb_flags ||
  795             btl.mb_speed != btlp->mb_speed ||
  796             btl.mb_width != btlp->mb_width)
  797                 rescan = 1;
  798 
  799  out:
  800         *btlp = btl;
  801 
  802         if (rescan && (btl.mb_flags & MLY_BTL_PROTECTED) == 0) {
  803                 xm.xm_target = target;
  804                 mly_get_xfer_mode(mly, bus, &xm);
  805                 /* XXX SCSI mid-layer rescan goes here. */
  806         }
  807 
  808         /* Wake anybody waiting on the device to be rescanned. */
  809         wakeup(btlp);
  810 
  811         free(mc->mc_data, M_DEVBUF);
  812         mly_ccb_free(mly, mc);
  813 }
  814 
  815 /*
  816  * Get the current health status and set the 'next event' counter to suit.
  817  */
  818 static int
  819 mly_get_eventstatus(struct mly_softc *mly)
  820 {
  821         struct mly_cmd_ioctl mci;
  822         struct mly_health_status *mh;
  823         int rv;
  824 
  825         /* Build the gethealthstatus ioctl and send it. */
  826         memset(&mci, 0, sizeof(mci));
  827         mh = NULL;
  828         mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS;
  829 
  830         rv = mly_ioctl(mly, &mci, (void *)&mh, sizeof(*mh), NULL, NULL);
  831         if (rv)
  832                 return (rv);
  833 
  834         /* Get the event counter. */
  835         mly->mly_event_change = le32toh(mh->change_counter);
  836         mly->mly_event_waiting = le32toh(mh->next_event);
  837         mly->mly_event_counter = le32toh(mh->next_event);
  838 
  839         /* Save the health status into the memory mailbox */
  840         memcpy(&mly->mly_mmbox->mmm_health.status, mh, sizeof(*mh));
  841 
  842         bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
  843             offsetof(struct mly_mmbox, mmm_health),
  844             sizeof(mly->mly_mmbox->mmm_health),
  845             BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
  846 
  847         free(mh, M_DEVBUF);
  848         return (0);
  849 }
  850 
  851 /*
  852  * Enable memory mailbox mode.
  853  */
  854 static int
  855 mly_enable_mmbox(struct mly_softc *mly)
  856 {
  857         struct mly_cmd_ioctl mci;
  858         u_int8_t *sp;
  859         u_int64_t tmp;
  860         int rv;
  861 
  862         /* Build the ioctl and send it. */
  863         memset(&mci, 0, sizeof(mci));
  864         mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX;
  865 
  866         /* Set buffer addresses. */
  867         tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command);
  868         mci.param.setmemorymailbox.command_mailbox_physaddr = htole64(tmp);
  869 
  870         tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status);
  871         mci.param.setmemorymailbox.status_mailbox_physaddr = htole64(tmp);
  872 
  873         tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health);
  874         mci.param.setmemorymailbox.health_buffer_physaddr = htole64(tmp);
  875 
  876         /* Set buffer sizes - abuse of data_size field is revolting. */
  877         sp = (u_int8_t *)&mci.data_size;
  878         sp[0] = (sizeof(union mly_cmd_packet) * MLY_MMBOX_COMMANDS) >> 10;
  879         sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) >> 10;
  880         mci.param.setmemorymailbox.health_buffer_size =
  881             sizeof(union mly_health_region) >> 10;
  882 
  883         rv = mly_ioctl(mly, &mci, NULL, 0, NULL, NULL);
  884         if (rv)
  885                 return (rv);
  886 
  887         mly->mly_state |= MLY_STATE_MMBOX_ACTIVE;
  888         return (0);
  889 }
  890 
  891 /*
  892  * Flush all pending I/O from the controller.
  893  */
  894 static int
  895 mly_flush(struct mly_softc *mly)
  896 {
  897         struct mly_cmd_ioctl mci;
  898 
  899         /* Build the ioctl */
  900         memset(&mci, 0, sizeof(mci));
  901         mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA;
  902         mci.param.deviceoperation.operation_device =
  903             MLY_OPDEVICE_PHYSICAL_CONTROLLER;
  904 
  905         /* Pass it off to the controller */
  906         return (mly_ioctl(mly, &mci, NULL, 0, NULL, NULL));
  907 }
  908 
  909 /*
  910  * Perform an ioctl command.
  911  *
  912  * If (data) is not NULL, the command requires data transfer to the
  913  * controller.  If (*data) is NULL the command requires data transfer from
  914  * the controller, and we will allocate a buffer for it.
  915  */
  916 static int
  917 mly_ioctl(struct mly_softc *mly, struct mly_cmd_ioctl *ioctl, void **data,
  918           size_t datasize, void *sense_buffer,
  919           size_t *sense_length)
  920 {
  921         struct mly_ccb *mc;
  922         struct mly_cmd_ioctl *mci;
  923         u_int8_t status;
  924         int rv;
  925 
  926         mc = NULL;
  927         if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
  928                 goto bad;
  929 
  930         /*
  931          * Copy the ioctl structure, but save some important fields and then
  932          * fixup.
  933          */
  934         mci = &mc->mc_packet->ioctl;
  935         ioctl->sense_buffer_address = htole64(mci->sense_buffer_address);
  936         ioctl->maximum_sense_size = mci->maximum_sense_size;
  937         *mci = *ioctl;
  938         mci->opcode = MDACMD_IOCTL;
  939         mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
  940 
  941         /* Handle the data buffer. */
  942         if (data != NULL) {
  943                 if (*data == NULL) {
  944                         /* Allocate data buffer */
  945                         mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT);
  946                         mc->mc_flags |= MLY_CCB_DATAIN;
  947                 } else {
  948                         mc->mc_data = *data;
  949                         mc->mc_flags |= MLY_CCB_DATAOUT;
  950                 }
  951                 mc->mc_length = datasize;
  952                 mc->mc_packet->generic.data_size = htole32(datasize);
  953         }
  954 
  955         /* Run the command. */
  956         if (datasize > 0)
  957                 if ((rv = mly_ccb_map(mly, mc)) != 0)
  958                         goto bad;
  959         rv = mly_ccb_poll(mly, mc, 30000);
  960         if (datasize > 0)
  961                 mly_ccb_unmap(mly, mc);
  962         if (rv != 0)
  963                 goto bad;
  964 
  965         /* Clean up and return any data. */
  966         status = mc->mc_status;
  967 
  968         if (status != 0)
  969                 printf("mly_ioctl: command status %d\n", status);
  970 
  971         if (mc->mc_sense > 0 && sense_buffer != NULL) {
  972                 memcpy(sense_buffer, mc->mc_packet, mc->mc_sense);
  973                 *sense_length = mc->mc_sense;
  974                 goto bad;
  975         }
  976 
  977         /* Should we return a data pointer? */
  978         if (data != NULL && *data == NULL)
  979                 *data = mc->mc_data;
  980 
  981         /* Command completed OK. */
  982         rv = (status != 0 ? EIO : 0);
  983 
  984  bad:
  985         if (mc != NULL) {
  986                 /* Do we need to free a data buffer we allocated? */
  987                 if (rv != 0 && mc->mc_data != NULL && *data == NULL)
  988                         free(mc->mc_data, M_DEVBUF);
  989                 mly_ccb_free(mly, mc);
  990         }
  991 
  992         return (rv);
  993 }
  994 
  995 /*
  996  * Check for event(s) outstanding in the controller.
  997  */
  998 static void
  999 mly_check_event(struct mly_softc *mly)
 1000 {
 1001 
 1002         bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
 1003             offsetof(struct mly_mmbox, mmm_health),
 1004             sizeof(mly->mly_mmbox->mmm_health),
 1005             BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
 1006 
 1007         /*
 1008          * The controller may have updated the health status information, so
 1009          * check for it here.  Note that the counters are all in host
 1010          * memory, so this check is very cheap.  Also note that we depend on
 1011          * checking on completion
 1012          */
 1013         if (le32toh(mly->mly_mmbox->mmm_health.status.change_counter) !=
 1014             mly->mly_event_change) {
 1015                 mly->mly_event_change =
 1016                     le32toh(mly->mly_mmbox->mmm_health.status.change_counter);
 1017                 mly->mly_event_waiting =
 1018                     le32toh(mly->mly_mmbox->mmm_health.status.next_event);
 1019 
 1020                 /* Wake up anyone that might be interested in this. */
 1021                 wakeup(&mly->mly_event_change);
 1022         }
 1023 
 1024         bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
 1025             offsetof(struct mly_mmbox, mmm_health),
 1026             sizeof(mly->mly_mmbox->mmm_health),
 1027             BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
 1028 
 1029         if (mly->mly_event_counter != mly->mly_event_waiting)
 1030                 mly_fetch_event(mly);
 1031 }
 1032 
 1033 /*
 1034  * Fetch one event from the controller.  If we fail due to resource
 1035  * starvation, we'll be retried the next time a command completes.
 1036  */
 1037 static void
 1038 mly_fetch_event(struct mly_softc *mly)
 1039 {
 1040         struct mly_ccb *mc;
 1041         struct mly_cmd_ioctl *mci;
 1042         int s;
 1043         u_int32_t event;
 1044 
 1045         /* Get a command. */
 1046         if (mly_ccb_alloc(mly, &mc))
 1047                 return;
 1048 
 1049         /* Set up the data buffer. */
 1050         mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF,
 1051             M_NOWAIT|M_ZERO);
 1052 
 1053         mc->mc_length = sizeof(struct mly_event);
 1054         mc->mc_flags |= MLY_CCB_DATAIN;
 1055         mc->mc_complete = mly_complete_event;
 1056 
 1057         /*
 1058          * Get an event number to fetch.  It's possible that we've raced
 1059          * with another context for the last event, in which case there will
 1060          * be no more events.
 1061          */
 1062         s = splbio();
 1063         if (mly->mly_event_counter == mly->mly_event_waiting) {
 1064                 splx(s);
 1065                 free(mc->mc_data, M_DEVBUF);
 1066                 mly_ccb_free(mly, mc);
 1067                 return;
 1068         }
 1069         event = mly->mly_event_counter++;
 1070         splx(s);
 1071 
 1072         /* 
 1073          * Build the ioctl.
 1074          *
 1075          * At this point we are committed to sending this request, as it
 1076          * will be the only one constructed for this particular event
 1077          * number.
 1078          */
 1079         mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
 1080         mci->opcode = MDACMD_IOCTL;
 1081         mci->data_size = htole32(sizeof(struct mly_event));
 1082         _lto3l(MLY_PHYADDR(0, 0, (event >> 16) & 0xff, (event >> 24) & 0xff),
 1083             mci->addr);
 1084         mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
 1085         mci->sub_ioctl = MDACIOCTL_GETEVENT;
 1086         mci->param.getevent.sequence_number_low = htole16(event & 0xffff);
 1087 
 1088         /*
 1089          * Submit the command.
 1090          */
 1091         if (mly_ccb_map(mly, mc) != 0)
 1092                 goto bad;
 1093         mly_ccb_enqueue(mly, mc);
 1094         return;
 1095 
 1096  bad:
 1097         printf("%s: couldn't fetch event %u\n", mly->mly_dv.dv_xname, event);
 1098         free(mc->mc_data, M_DEVBUF);
 1099         mly_ccb_free(mly, mc);
 1100 }
 1101 
 1102 /*
 1103  * Handle the completion of an event poll.
 1104  */
 1105 static void
 1106 mly_complete_event(struct mly_softc *mly, struct mly_ccb *mc)
 1107 {
 1108         struct mly_event *me;
 1109 
 1110         me = (struct mly_event *)mc->mc_data;
 1111         mly_ccb_unmap(mly, mc);
 1112         mly_ccb_free(mly, mc);
 1113 
 1114         /* If the event was successfully fetched, process it. */
 1115         if (mc->mc_status == SCSI_OK)
 1116                 mly_process_event(mly, me);
 1117         else
 1118                 printf("%s: unable to fetch event; status = 0x%x\n",
 1119                     mly->mly_dv.dv_xname, mc->mc_status);
 1120 
 1121         free(me, M_DEVBUF);
 1122 
 1123         /* Check for another event. */
 1124         mly_check_event(mly);
 1125 }
 1126 
 1127 /*
 1128  * Process a controller event.  Called with interrupts blocked (i.e., at
 1129  * interrupt time).
 1130  */
 1131 static void
 1132 mly_process_event(struct mly_softc *mly, struct mly_event *me)
 1133 {
 1134         struct scsipi_sense_data *ssd;
 1135         int bus, target, event, class, action;
 1136         const char *fp, *tp;
 1137 
 1138         ssd = (struct scsipi_sense_data *)&me->sense[0];
 1139 
 1140         /* 
 1141          * Errors can be reported using vendor-unique sense data.  In this
 1142          * case, the event code will be 0x1c (Request sense data present),
 1143          * the sense key will be 0x09 (vendor specific), the MSB of the ASC
 1144          * will be set, and the actual event code will be a 16-bit value
 1145          * comprised of the ASCQ (low byte) and low seven bits of the ASC
 1146          * (low seven bits of the high byte).
 1147          */
 1148         if (le32toh(me->code) == 0x1c &&
 1149             (ssd->flags & SSD_KEY) == SKEY_VENDOR_UNIQUE &&
 1150             (ssd->add_sense_code & 0x80) != 0) {
 1151                 event = ((int)(ssd->add_sense_code & ~0x80) << 8) +
 1152                     ssd->add_sense_code_qual;
 1153         } else
 1154                 event = le32toh(me->code);
 1155 
 1156         /* Look up event, get codes. */
 1157         fp = mly_describe_code(mly_table_event, event);
 1158 
 1159         /* Quiet event? */
 1160         class = fp[0];
 1161 #ifdef notyet
 1162         if (isupper(class) && bootverbose)
 1163                 class = tolower(class);
 1164 #endif
 1165 
 1166         /* Get action code, text string. */
 1167         action = fp[1];
 1168         tp = fp + 3;
 1169 
 1170         /*
 1171          * Print some information about the event.
 1172          *
 1173          * This code uses a table derived from the corresponding portion of
 1174          * the Linux driver, and thus the parser is very similar.
 1175          */
 1176         switch (class) {
 1177         case 'p':
 1178                 /*
 1179                  * Error on physical drive.
 1180                  */
 1181                 printf("%s: physical device %d:%d %s\n", mly->mly_dv.dv_xname,
 1182                     me->channel, me->target, tp);
 1183                 if (action == 'r')
 1184                         mly->mly_btl[me->channel][me->target].mb_flags |=
 1185                             MLY_BTL_RESCAN;
 1186                 break;
 1187 
 1188         case 'l':
 1189         case 'm':
 1190                 /*
 1191                  * Error on logical unit, or message about logical unit.
 1192                  */
 1193                 bus = MLY_LOGDEV_BUS(mly, me->lun);
 1194                 target = MLY_LOGDEV_TARGET(mly, me->lun);
 1195                 printf("%s: logical device %d:%d %s\n", mly->mly_dv.dv_xname,
 1196                     bus, target, tp);
 1197                 if (action == 'r')
 1198                         mly->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
 1199                 break;
 1200 
 1201         case 's':
 1202                 /*
 1203                  * Report of sense data.
 1204                  */
 1205                 if (((ssd->flags & SSD_KEY) == SKEY_NO_SENSE ||
 1206                     (ssd->flags & SSD_KEY) == SKEY_NOT_READY) && 
 1207                     ssd->add_sense_code == 0x04 && 
 1208                     (ssd->add_sense_code_qual == 0x01 ||
 1209                     ssd->add_sense_code_qual == 0x02)) {
 1210                         /* Ignore NO_SENSE or NOT_READY in one case */
 1211                         break;
 1212                 }
 1213 
 1214                 /*
 1215                  * XXX Should translate this if SCSIVERBOSE.
 1216                  */
 1217                 printf("%s: physical device %d:%d %s\n", mly->mly_dv.dv_xname,
 1218                     me->channel, me->target, tp);
 1219                 printf("%s:  sense key %d  asc %02x  ascq %02x\n",
 1220                     mly->mly_dv.dv_xname, ssd->flags & SSD_KEY,
 1221                     ssd->add_sense_code, ssd->add_sense_code_qual);
 1222                 printf("%s:  info %x%x%x%x  csi %x%x%x%x\n",
 1223                     mly->mly_dv.dv_xname, ssd->info[0], ssd->info[1],
 1224                     ssd->info[2], ssd->info[3], ssd->cmd_spec_info[0],
 1225                     ssd->cmd_spec_info[1], ssd->cmd_spec_info[2],
 1226                     ssd->cmd_spec_info[3]);
 1227                 if (action == 'r')
 1228                         mly->mly_btl[me->channel][me->target].mb_flags |=
 1229                             MLY_BTL_RESCAN;
 1230                 break;
 1231 
 1232         case 'e':
 1233                 printf("%s: ", mly->mly_dv.dv_xname);
 1234                 printf(tp, me->target, me->lun);
 1235                 break;
 1236 
 1237         case 'c':
 1238                 printf("%s: controller %s\n", mly->mly_dv.dv_xname, tp);
 1239                 break;
 1240 
 1241         case '?':
 1242                 printf("%s: %s - %d\n", mly->mly_dv.dv_xname, tp, event);
 1243                 break;
 1244 
 1245         default:
 1246                 /* Probably a 'noisy' event being ignored. */
 1247                 break;
 1248         }
 1249 }
 1250 
 1251 /*
 1252  * Create the monitoring thread.  Called after the standard kernel threads
 1253  * have been created.
 1254  */
 1255 static void
 1256 mly_thread_create(void *cookie)
 1257 {
 1258         struct mly_softc *mly;
 1259         int rv;
 1260 
 1261         mly = cookie;
 1262 
 1263         rv = kthread_create1(mly_thread, mly, &mly->mly_thread, "%s",
 1264             mly->mly_dv.dv_xname);
 1265         if (rv != 0)
 1266                 printf("%s: unable to create thread (%d)\n",
 1267                     mly->mly_dv.dv_xname, rv);
 1268 }
 1269 
 1270 /*
 1271  * Perform periodic activities.
 1272  */
 1273 static void
 1274 mly_thread(void *cookie)
 1275 {
 1276         struct mly_softc *mly;
 1277         struct mly_btl *btl;
 1278         int s, bus, target, done;
 1279 
 1280         mly = (struct mly_softc *)cookie;
 1281 
 1282         for (;;) {
 1283                 /* Check for new events. */
 1284                 mly_check_event(mly);
 1285 
 1286                 /* Re-scan up to 1 device. */
 1287                 s = splbio();
 1288                 done = 0;
 1289                 for (bus = 0; bus < mly->mly_nchans && !done; bus++) {
 1290                         for (target = 0; target < MLY_MAX_TARGETS; target++) {
 1291                                 /* Perform device rescan? */
 1292                                 btl = &mly->mly_btl[bus][target];
 1293                                 if ((btl->mb_flags & MLY_BTL_RESCAN) != 0) {
 1294                                         btl->mb_flags ^= MLY_BTL_RESCAN;
 1295                                         mly_scan_btl(mly, bus, target);
 1296                                         done = 1;
 1297                                         break;
 1298                                 }
 1299                         }
 1300                 }
 1301                 splx(s);
 1302 
 1303                 /* Sleep for N seconds. */
 1304                 tsleep(mly_thread, PWAIT, "mlyzzz",
 1305                     hz * MLY_PERIODIC_INTERVAL);
 1306         }
 1307 }
 1308 
 1309 /*
 1310  * Submit a command to the controller and poll on completion.  Return
 1311  * non-zero on timeout.
 1312  */
 1313 static int
 1314 mly_ccb_poll(struct mly_softc *mly, struct mly_ccb *mc, int timo)
 1315 {
 1316         int rv;
 1317 
 1318         if ((rv = mly_ccb_submit(mly, mc)) != 0)
 1319                 return (rv);
 1320 
 1321         for (timo *= 10; timo != 0; timo--) {
 1322                 if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0)
 1323                         break;
 1324                 mly_intr(mly);
 1325                 DELAY(100);
 1326         }
 1327 
 1328         return (timo == 0);
 1329 }
 1330 
 1331 /*
 1332  * Submit a command to the controller and sleep on completion.  Return
 1333  * non-zero on timeout.
 1334  */
 1335 static int
 1336 mly_ccb_wait(struct mly_softc *mly, struct mly_ccb *mc, int timo)
 1337 {
 1338         int rv, s;
 1339 
 1340         mly_ccb_enqueue(mly, mc);
 1341 
 1342         s = splbio();
 1343         if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0) {
 1344                 splx(s);
 1345                 return (0);
 1346         }
 1347         rv = tsleep(mc, PRIBIO, "mlywccb", timo * hz / 1000);
 1348         splx(s);
 1349 
 1350         return (rv);
 1351 }
 1352 
 1353 /*
 1354  * If a CCB is specified, enqueue it.  Pull CCBs off the software queue in
 1355  * the order that they were enqueued and try to submit their command blocks
 1356  * to the controller for execution.
 1357  */
 1358 void
 1359 mly_ccb_enqueue(struct mly_softc *mly, struct mly_ccb *mc)
 1360 {
 1361         int s;
 1362 
 1363         s = splbio();
 1364 
 1365         if (mc != NULL)
 1366                 SIMPLEQ_INSERT_TAIL(&mly->mly_ccb_queue, mc, mc_link.simpleq);
 1367 
 1368         while ((mc = SIMPLEQ_FIRST(&mly->mly_ccb_queue)) != NULL) {
 1369                 if (mly_ccb_submit(mly, mc))
 1370                         break;
 1371                 SIMPLEQ_REMOVE_HEAD(&mly->mly_ccb_queue, mc_link.simpleq);
 1372         }
 1373 
 1374         splx(s);
 1375 }
 1376 
 1377 /*
 1378  * Deliver a command to the controller.
 1379  */
 1380 static int
 1381 mly_ccb_submit(struct mly_softc *mly, struct mly_ccb *mc)
 1382 {
 1383         union mly_cmd_packet *pkt;
 1384         int s, off;
 1385 
 1386         mc->mc_packet->generic.command_id = htole16(mc->mc_slot);
 1387 
 1388         bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap,
 1389             mc->mc_packetphys - mly->mly_pkt_busaddr,
 1390             sizeof(union mly_cmd_packet),
 1391             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1392 
 1393         s = splbio();
 1394 
 1395         /*
 1396          * Do we have to use the hardware mailbox?
 1397          */
 1398         if ((mly->mly_state & MLY_STATE_MMBOX_ACTIVE) == 0) {
 1399                 /*
 1400                  * Check to see if the controller is ready for us.
 1401                  */
 1402                 if (mly_idbr_true(mly, MLY_HM_CMDSENT)) {
 1403                         splx(s);
 1404                         return (EBUSY);
 1405                 }
 1406 
 1407                 /*
 1408                  * It's ready, send the command.
 1409                  */
 1410                 mly_outl(mly, mly->mly_cmd_mailbox,
 1411                     (u_int64_t)mc->mc_packetphys & 0xffffffff);
 1412                 mly_outl(mly, mly->mly_cmd_mailbox + 4,
 1413                     (u_int64_t)mc->mc_packetphys >> 32);
 1414                 mly_outb(mly, mly->mly_idbr, MLY_HM_CMDSENT);
 1415         } else {
 1416                 pkt = &mly->mly_mmbox->mmm_command[mly->mly_mmbox_cmd_idx];
 1417                 off = (caddr_t)pkt - (caddr_t)mly->mly_mmbox;
 1418 
 1419                 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
 1420                     off, sizeof(mly->mly_mmbox->mmm_command[0]),
 1421                     BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
 1422 
 1423                 /* Check to see if the next index is free yet. */
 1424                 if (pkt->mmbox.flag != 0) {
 1425                         splx(s);
 1426                         return (EBUSY);
 1427                 }
 1428 
 1429                 /* Copy in new command */
 1430                 memcpy(pkt->mmbox.data, mc->mc_packet->mmbox.data,
 1431                     sizeof(pkt->mmbox.data));
 1432 
 1433                 /* Copy flag last. */
 1434                 pkt->mmbox.flag = mc->mc_packet->mmbox.flag;
 1435 
 1436                 bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
 1437                     off, sizeof(mly->mly_mmbox->mmm_command[0]),
 1438                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
 1439 
 1440                 /* Signal controller and update index. */
 1441                 mly_outb(mly, mly->mly_idbr, MLY_AM_CMDSENT);
 1442                 mly->mly_mmbox_cmd_idx =
 1443                     (mly->mly_mmbox_cmd_idx + 1) % MLY_MMBOX_COMMANDS;
 1444         }
 1445 
 1446         splx(s);
 1447         return (0);
 1448 }
 1449 
 1450 /*
 1451  * Pick up completed commands from the controller and handle accordingly.
 1452  */
 1453 int
 1454 mly_intr(void *cookie)
 1455 {
 1456         struct mly_ccb *mc;
 1457         union mly_status_packet *sp;
 1458         u_int16_t slot;
 1459         int forus, off;
 1460         struct mly_softc *mly;
 1461 
 1462         mly = cookie;
 1463         forus = 0;
 1464 
 1465         /*
 1466          * Pick up hardware-mailbox commands.
 1467          */
 1468         if (mly_odbr_true(mly, MLY_HM_STSREADY)) {
 1469                 slot = mly_inw(mly, mly->mly_status_mailbox);
 1470 
 1471                 if (slot < MLY_SLOT_MAX) {
 1472                         mc = mly->mly_ccbs + (slot - MLY_SLOT_START);
 1473                         mc->mc_status =
 1474                             mly_inb(mly, mly->mly_status_mailbox + 2);
 1475                         mc->mc_sense =
 1476                             mly_inb(mly, mly->mly_status_mailbox + 3);
 1477                         mc->mc_resid =
 1478                             mly_inl(mly, mly->mly_status_mailbox + 4);
 1479 
 1480                         mly_ccb_complete(mly, mc);
 1481                 } else {
 1482                         /* Slot 0xffff may mean "extremely bogus command". */
 1483                         printf("%s: got HM completion for illegal slot %u\n",
 1484                             mly->mly_dv.dv_xname, slot);
 1485                 }
 1486 
 1487                 /* Unconditionally acknowledge status. */
 1488                 mly_outb(mly, mly->mly_odbr, MLY_HM_STSREADY);
 1489                 mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK);
 1490                 forus = 1;
 1491         }
 1492 
 1493         /*
 1494          * Pick up memory-mailbox commands.
 1495          */
 1496         if (mly_odbr_true(mly, MLY_AM_STSREADY)) {
 1497                 for (;;) {
 1498                         sp = &mly->mly_mmbox->mmm_status[mly->mly_mmbox_sts_idx];
 1499                         off = (caddr_t)sp - (caddr_t)mly->mly_mmbox;
 1500 
 1501                         bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
 1502                             off, sizeof(mly->mly_mmbox->mmm_command[0]),
 1503                             BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
 1504 
 1505                         /* Check for more status. */
 1506                         if (sp->mmbox.flag == 0)
 1507                                 break;
 1508 
 1509                         /* Get slot number. */
 1510                         slot = le16toh(sp->status.command_id);
 1511                         if (slot < MLY_SLOT_MAX) {
 1512                                 mc = mly->mly_ccbs + (slot - MLY_SLOT_START);
 1513                                 mc->mc_status = sp->status.status;
 1514                                 mc->mc_sense = sp->status.sense_length;
 1515                                 mc->mc_resid = le32toh(sp->status.residue);
 1516                                 mly_ccb_complete(mly, mc);
 1517                         } else {
 1518                                 /*
 1519                                  * Slot 0xffff may mean "extremely bogus
 1520                                  * command".
 1521                                  */
 1522                                 printf("%s: got AM completion for illegal "
 1523                                     "slot %u at %d\n", mly->mly_dv.dv_xname,
 1524                                     slot, mly->mly_mmbox_sts_idx);
 1525                         }
 1526 
 1527                         /* Clear and move to next index. */
 1528                         sp->mmbox.flag = 0;
 1529                         mly->mly_mmbox_sts_idx =
 1530                             (mly->mly_mmbox_sts_idx + 1) % MLY_MMBOX_STATUS;
 1531                 }
 1532 
 1533                 /* Acknowledge that we have collected status value(s). */
 1534                 mly_outb(mly, mly->mly_odbr, MLY_AM_STSREADY);
 1535                 forus = 1;
 1536         }
 1537 
 1538         /*
 1539          * Run the queue.
 1540          */
 1541         if (forus && ! SIMPLEQ_EMPTY(&mly->mly_ccb_queue))
 1542                 mly_ccb_enqueue(mly, NULL);
 1543 
 1544         return (forus);
 1545 }
 1546 
 1547 /*
 1548  * Process completed commands
 1549  */
 1550 static void
 1551 mly_ccb_complete(struct mly_softc *mly, struct mly_ccb *mc)
 1552 {
 1553         void (*complete)(struct mly_softc *, struct mly_ccb *);
 1554 
 1555         bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap,
 1556             mc->mc_packetphys - mly->mly_pkt_busaddr,
 1557             sizeof(union mly_cmd_packet),
 1558             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1559 
 1560         complete = mc->mc_complete;
 1561         mc->mc_flags |= MLY_CCB_COMPLETE;
 1562 
 1563         /* 
 1564          * Call completion handler or wake up sleeping consumer.
 1565          */
 1566         if (complete != NULL)
 1567                 (*complete)(mly, mc);
 1568         else
 1569                 wakeup(mc);
 1570 }
 1571 
 1572 /*
 1573  * Allocate a command.
 1574  */
 1575 int
 1576 mly_ccb_alloc(struct mly_softc *mly, struct mly_ccb **mcp)
 1577 {
 1578         struct mly_ccb *mc;
 1579         int s;
 1580 
 1581         s = splbio();
 1582         mc = SLIST_FIRST(&mly->mly_ccb_free);
 1583         if (mc != NULL)
 1584                 SLIST_REMOVE_HEAD(&mly->mly_ccb_free, mc_link.slist);
 1585         splx(s);
 1586 
 1587         *mcp = mc;
 1588         return (mc == NULL ? EAGAIN : 0);
 1589 }
 1590 
 1591 /*
 1592  * Release a command back to the freelist.
 1593  */
 1594 void
 1595 mly_ccb_free(struct mly_softc *mly, struct mly_ccb *mc)
 1596 {
 1597         int s;
 1598 
 1599         /*
 1600          * Fill in parts of the command that may cause confusion if a
 1601          * consumer doesn't when we are later allocated.
 1602          */
 1603         mc->mc_data = NULL;
 1604         mc->mc_flags = 0;
 1605         mc->mc_complete = NULL;
 1606         mc->mc_private = NULL;
 1607         mc->mc_packet->generic.command_control = 0;
 1608 
 1609         /*
 1610          * By default, we set up to overwrite the command packet with sense
 1611          * information.
 1612          */
 1613         mc->mc_packet->generic.sense_buffer_address =
 1614             htole64(mc->mc_packetphys);
 1615         mc->mc_packet->generic.maximum_sense_size =
 1616             sizeof(union mly_cmd_packet);
 1617 
 1618         s = splbio();
 1619         SLIST_INSERT_HEAD(&mly->mly_ccb_free, mc, mc_link.slist);
 1620         splx(s);
 1621 }
 1622 
 1623 /*
 1624  * Allocate and initialize command and packet structures.
 1625  *
 1626  * If the controller supports fewer than MLY_MAX_CCBS commands, limit our
 1627  * allocation to that number.  If we don't yet know how many commands the
 1628  * controller supports, allocate a very small set (suitable for initialization
 1629  * purposes only).
 1630  */
 1631 static int
 1632 mly_alloc_ccbs(struct mly_softc *mly)
 1633 {
 1634         struct mly_ccb *mc;
 1635         int i, rv;
 1636 
 1637         if (mly->mly_controllerinfo == NULL)
 1638                 mly->mly_ncmds = MLY_CCBS_RESV;
 1639         else {
 1640                 i = le16toh(mly->mly_controllerinfo->maximum_parallel_commands);
 1641                 mly->mly_ncmds = min(MLY_MAX_CCBS, i);
 1642         }
 1643 
 1644         /*
 1645          * Allocate enough space for all the command packets in one chunk
 1646          * and map them permanently into controller-visible space.
 1647          */
 1648         rv = mly_dmamem_alloc(mly,
 1649             mly->mly_ncmds * sizeof(union mly_cmd_packet),
 1650             &mly->mly_pkt_dmamap, (caddr_t *)&mly->mly_pkt,
 1651             &mly->mly_pkt_busaddr, &mly->mly_pkt_seg);
 1652         if (rv)
 1653                 return (rv);
 1654 
 1655         mly->mly_ccbs = malloc(sizeof(struct mly_ccb) * mly->mly_ncmds,
 1656             M_DEVBUF, M_NOWAIT|M_ZERO);
 1657 
 1658         for (i = 0; i < mly->mly_ncmds; i++) {
 1659                 mc = mly->mly_ccbs + i;
 1660                 mc->mc_slot = MLY_SLOT_START + i;
 1661                 mc->mc_packet = mly->mly_pkt + i;
 1662                 mc->mc_packetphys = mly->mly_pkt_busaddr +
 1663                     (i * sizeof(union mly_cmd_packet));
 1664 
 1665                 rv = bus_dmamap_create(mly->mly_dmat, MLY_MAX_XFER,
 1666                     MLY_MAX_SEGS, MLY_MAX_XFER, 0,
 1667                     BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
 1668                     &mc->mc_datamap);
 1669                 if (rv) {
 1670                         mly_release_ccbs(mly);
 1671                         return (rv);
 1672                 }
 1673 
 1674                 mly_ccb_free(mly, mc);
 1675         }
 1676 
 1677         return (0);
 1678 }
 1679 
 1680 /*
 1681  * Free all the storage held by commands.
 1682  *
 1683  * Must be called with all commands on the free list.
 1684  */
 1685 static void
 1686 mly_release_ccbs(struct mly_softc *mly)
 1687 {
 1688         struct mly_ccb *mc;
 1689 
 1690         /* Throw away command buffer DMA maps. */
 1691         while (mly_ccb_alloc(mly, &mc) == 0)
 1692                 bus_dmamap_destroy(mly->mly_dmat, mc->mc_datamap);
 1693 
 1694         /* Release CCB storage. */
 1695         free(mly->mly_ccbs, M_DEVBUF);
 1696 
 1697         /* Release the packet storage. */
 1698         mly_dmamem_free(mly, mly->mly_ncmds * sizeof(union mly_cmd_packet),
 1699             mly->mly_pkt_dmamap, (caddr_t)mly->mly_pkt, &mly->mly_pkt_seg);
 1700 }
 1701 
 1702 /*
 1703  * Map a command into controller-visible space.
 1704  */
 1705 static int
 1706 mly_ccb_map(struct mly_softc *mly, struct mly_ccb *mc)
 1707 {
 1708         struct mly_cmd_generic *gen;
 1709         struct mly_sg_entry *sg;
 1710         bus_dma_segment_t *ds;
 1711         int flg, nseg, rv;
 1712 
 1713 #ifdef DIAGNOSTIC
 1714         /* Don't map more than once. */
 1715         if ((mc->mc_flags & MLY_CCB_MAPPED) != 0)
 1716                 panic("mly_ccb_map: already mapped");
 1717         mc->mc_flags |= MLY_CCB_MAPPED;
 1718 
 1719         /* Does the command have a data buffer? */
 1720         if (mc->mc_data == NULL)
 1721                 panic("mly_ccb_map: no data buffer");
 1722 #endif
 1723 
 1724         rv = bus_dmamap_load(mly->mly_dmat, mc->mc_datamap, mc->mc_data,
 1725             mc->mc_length, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
 1726             ((mc->mc_flags & MLY_CCB_DATAIN) != 0 ?
 1727             BUS_DMA_READ : BUS_DMA_WRITE));
 1728         if (rv != 0)
 1729                 return (rv);
 1730 
 1731         gen = &mc->mc_packet->generic;
 1732 
 1733         /*
 1734          * Can we use the transfer structure directly?
 1735          */
 1736         if ((nseg = mc->mc_datamap->dm_nsegs) <= 2) {
 1737                 mc->mc_sgoff = -1;
 1738                 sg = &gen->transfer.direct.sg[0];
 1739         } else {
 1740                 mc->mc_sgoff = (mc->mc_slot - MLY_SLOT_START) *
 1741                     MLY_MAX_SEGS;
 1742                 sg = mly->mly_sg + mc->mc_sgoff;
 1743                 gen->command_control |= MLY_CMDCTL_EXTENDED_SG_TABLE;
 1744                 gen->transfer.indirect.entries[0] = htole16(nseg);
 1745                 gen->transfer.indirect.table_physaddr[0] =
 1746                     htole64(mly->mly_sg_busaddr +
 1747                     (mc->mc_sgoff * sizeof(struct mly_sg_entry)));
 1748         }
 1749 
 1750         /*
 1751          * Fill the S/G table.
 1752          */
 1753         for (ds = mc->mc_datamap->dm_segs; nseg != 0; nseg--, sg++, ds++) {
 1754                 sg->physaddr = htole64(ds->ds_addr);
 1755                 sg->length = htole64(ds->ds_len);
 1756         }
 1757 
 1758         /*
 1759          * Sync up the data map.
 1760          */
 1761         if ((mc->mc_flags & MLY_CCB_DATAIN) != 0)
 1762                 flg = BUS_DMASYNC_PREREAD;
 1763         else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */ {
 1764                 gen->command_control |= MLY_CMDCTL_DATA_DIRECTION;
 1765                 flg = BUS_DMASYNC_PREWRITE;
 1766         }
 1767 
 1768         bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg);
 1769 
 1770         /*
 1771          * Sync up the chained S/G table, if we're using one.
 1772          */
 1773         if (mc->mc_sgoff == -1)
 1774                 return (0);
 1775 
 1776         bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff,
 1777             MLY_SGL_SIZE, BUS_DMASYNC_PREWRITE);
 1778 
 1779         return (0);
 1780 }
 1781 
 1782 /*
 1783  * Unmap a command from controller-visible space.
 1784  */
 1785 static void
 1786 mly_ccb_unmap(struct mly_softc *mly, struct mly_ccb *mc)
 1787 {
 1788         int flg;
 1789 
 1790 #ifdef DIAGNOSTIC
 1791         if ((mc->mc_flags & MLY_CCB_MAPPED) == 0)
 1792                 panic("mly_ccb_unmap: not mapped");
 1793         mc->mc_flags &= ~MLY_CCB_MAPPED;
 1794 #endif
 1795 
 1796         if ((mc->mc_flags & MLY_CCB_DATAIN) != 0)
 1797                 flg = BUS_DMASYNC_POSTREAD;
 1798         else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */
 1799                 flg = BUS_DMASYNC_POSTWRITE;
 1800 
 1801         bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg);
 1802         bus_dmamap_unload(mly->mly_dmat, mc->mc_datamap);
 1803 
 1804         if (mc->mc_sgoff == -1)
 1805                 return;
 1806 
 1807         bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff,
 1808             MLY_SGL_SIZE, BUS_DMASYNC_POSTWRITE);
 1809 }
 1810 
 1811 /*
 1812  * Adjust the size of each I/O before it passes to the SCSI layer.
 1813  */
 1814 static void
 1815 mly_scsipi_minphys(struct buf *bp)
 1816 {
 1817 
 1818         if (bp->b_bcount > MLY_MAX_XFER)
 1819                 bp->b_bcount = MLY_MAX_XFER;
 1820         minphys(bp);
 1821 }
 1822 
 1823 /*
 1824  * Start a SCSI command.
 1825  */
 1826 static void
 1827 mly_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
 1828                    void *arg)
 1829 {
 1830         struct mly_ccb *mc;
 1831         struct mly_cmd_scsi_small *ss;
 1832         struct scsipi_xfer *xs;
 1833         struct scsipi_periph *periph;
 1834         struct mly_softc *mly;
 1835         struct mly_btl *btl;
 1836         int s, tmp;
 1837 
 1838         mly = (void *)chan->chan_adapter->adapt_dev;
 1839 
 1840         switch (req) {
 1841         case ADAPTER_REQ_RUN_XFER:
 1842                 xs = arg;
 1843                 periph = xs->xs_periph;
 1844                 btl = &mly->mly_btl[chan->chan_channel][periph->periph_target];
 1845                 s = splbio();
 1846                 tmp = btl->mb_flags;
 1847                 splx(s);
 1848 
 1849                 /*
 1850                  * Check for I/O attempt to a protected or non-existant
 1851                  * device.
 1852                  */
 1853                 if ((tmp & MLY_BTL_PROTECTED) != 0) {
 1854                         xs->error = XS_SELTIMEOUT;
 1855                         scsipi_done(xs);
 1856                         break;
 1857                 }
 1858 
 1859 #ifdef DIAGNOSTIC
 1860                 /* XXX Increase if/when we support large SCSI commands. */
 1861                 if (xs->cmdlen > MLY_CMD_SCSI_SMALL_CDB) {
 1862                         printf("%s: cmd too large\n", mly->mly_dv.dv_xname);
 1863                         xs->error = XS_DRIVER_STUFFUP;
 1864                         scsipi_done(xs);
 1865                         break;
 1866                 }
 1867 #endif
 1868 
 1869                 if (mly_ccb_alloc(mly, &mc)) {
 1870                         xs->error = XS_RESOURCE_SHORTAGE;
 1871                         scsipi_done(xs);
 1872                         break;
 1873                 }
 1874 
 1875                 /* Build the command. */
 1876                 mc->mc_data = xs->data;
 1877                 mc->mc_length = xs->datalen;
 1878                 mc->mc_complete = mly_scsipi_complete;
 1879                 mc->mc_private = xs;
 1880 
 1881                 /* Build the packet for the controller. */
 1882                 ss = &mc->mc_packet->scsi_small;
 1883                 ss->opcode = MDACMD_SCSI;
 1884 #ifdef notdef
 1885                 /*
 1886                  * XXX FreeBSD does this, but it doesn't fix anything,
 1887                  * XXX and appears potentially harmful.
 1888                  */
 1889                 ss->command_control |= MLY_CMDCTL_DISABLE_DISCONNECT;
 1890 #endif
 1891 
 1892                 ss->data_size = htole32(xs->datalen);
 1893                 _lto3l(MLY_PHYADDR(0, chan->chan_channel,
 1894                     periph->periph_target, periph->periph_lun), ss->addr);
 1895 
 1896                 if (xs->timeout < 60 * 1000)
 1897                         ss->timeout = xs->timeout / 1000 |
 1898                             MLY_TIMEOUT_SECONDS;
 1899                 else if (xs->timeout < 60 * 60 * 1000)
 1900                         ss->timeout = xs->timeout / (60 * 1000) |
 1901                             MLY_TIMEOUT_MINUTES;
 1902                 else
 1903                         ss->timeout = xs->timeout / (60 * 60 * 1000) |
 1904                             MLY_TIMEOUT_HOURS;
 1905 
 1906                 ss->maximum_sense_size = sizeof(xs->sense);
 1907                 ss->cdb_length = xs->cmdlen;
 1908                 memcpy(ss->cdb, xs->cmd, xs->cmdlen);
 1909 
 1910                 if (mc->mc_length != 0) {
 1911                         if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
 1912                                 mc->mc_flags |= MLY_CCB_DATAOUT;
 1913                         else /* if ((xs->xs_control & XS_CTL_DATA_IN) != 0) */
 1914                                 mc->mc_flags |= MLY_CCB_DATAIN;
 1915 
 1916                         if (mly_ccb_map(mly, mc) != 0) {
 1917                                 xs->error = XS_DRIVER_STUFFUP;
 1918                                 mly_ccb_free(mly, mc);
 1919                                 scsipi_done(xs);
 1920                                 break;
 1921                         }
 1922                 }
 1923 
 1924                 /*
 1925                  * Give the command to the controller.
 1926                  */
 1927                 if ((xs->xs_control & XS_CTL_POLL) != 0) {
 1928                         if (mly_ccb_poll(mly, mc, xs->timeout + 5000)) {
 1929                                 xs->error = XS_REQUEUE;
 1930                                 if (mc->mc_length != 0)
 1931                                         mly_ccb_unmap(mly, mc);
 1932                                 mly_ccb_free(mly, mc);
 1933                                 scsipi_done(xs);
 1934                         }
 1935                 } else
 1936                         mly_ccb_enqueue(mly, mc);
 1937 
 1938                 break;
 1939 
 1940         case ADAPTER_REQ_GROW_RESOURCES:
 1941                 /*
 1942                  * Not supported.
 1943                  */
 1944                 break;
 1945 
 1946         case ADAPTER_REQ_SET_XFER_MODE:
 1947                 /*
 1948                  * We can't change the transfer mode, but at least let
 1949                  * scsipi know what the adapter has negotiated.
 1950                  */
 1951                 mly_get_xfer_mode(mly, chan->chan_channel, arg);
 1952                 break;
 1953         }
 1954 }
 1955 
 1956 /*
 1957  * Handle completion of a SCSI command.
 1958  */
 1959 static void
 1960 mly_scsipi_complete(struct mly_softc *mly, struct mly_ccb *mc)
 1961 {
 1962         struct scsipi_xfer *xs;
 1963         struct scsipi_channel *chan;
 1964         struct scsipi_inquiry_data *inq;
 1965         struct mly_btl *btl;
 1966         int target, sl, s;
 1967         const char *p;
 1968 
 1969         xs = mc->mc_private;
 1970         xs->status = mc->mc_status;
 1971 
 1972         /*
 1973          * XXX The `resid' value as returned by the controller appears to be
 1974          * bogus, so we always set it to zero.  Is it perhaps the transfer
 1975          * count?
 1976          */
 1977         xs->resid = 0; /* mc->mc_resid; */
 1978 
 1979         if (mc->mc_length != 0)
 1980                 mly_ccb_unmap(mly, mc);
 1981 
 1982         switch (mc->mc_status) {
 1983         case SCSI_OK:
 1984                 /*
 1985                  * In order to report logical device type and status, we
 1986                  * overwrite the result of the INQUIRY command to logical
 1987                  * devices.
 1988                  */
 1989                 if (xs->cmd->opcode == INQUIRY) {
 1990                         chan = xs->xs_periph->periph_channel;
 1991                         target = xs->xs_periph->periph_target;
 1992                         btl = &mly->mly_btl[chan->chan_channel][target];
 1993 
 1994                         s = splbio();
 1995                         if ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) {
 1996                                 inq = (struct scsipi_inquiry_data *)xs->data;
 1997                                 mly_padstr(inq->vendor, "MYLEX", 8);
 1998                                 p = mly_describe_code(mly_table_device_type,
 1999                                     btl->mb_type);
 2000                                 mly_padstr(inq->product, p, 16);
 2001                                 p = mly_describe_code(mly_table_device_state,
 2002                                     btl->mb_state);
 2003                                 mly_padstr(inq->revision, p, 4);
 2004                         }
 2005                         splx(s);
 2006                 }
 2007 
 2008                 xs->error = XS_NOERROR;
 2009                 break;
 2010 
 2011         case SCSI_CHECK:
 2012                 sl = mc->mc_sense;
 2013                 if (sl > sizeof(xs->sense.scsi_sense))
 2014                         sl = sizeof(xs->sense.scsi_sense);
 2015                 memcpy(&xs->sense.scsi_sense, mc->mc_packet, sl);
 2016                 xs->error = XS_SENSE;
 2017                 break;
 2018 
 2019         case SCSI_BUSY:
 2020         case SCSI_QUEUE_FULL:
 2021                 xs->error = XS_BUSY;
 2022                 break;
 2023 
 2024         default:
 2025                 printf("%s: unknown SCSI status 0x%x\n",
 2026                     mly->mly_dv.dv_xname, xs->status);
 2027                 xs->error = XS_DRIVER_STUFFUP;
 2028                 break;
 2029         }
 2030 
 2031         mly_ccb_free(mly, mc);
 2032         scsipi_done(xs);
 2033 }
 2034 
 2035 /*
 2036  * Notify scsipi about a target's transfer mode.
 2037  */
 2038 static void
 2039 mly_get_xfer_mode(struct mly_softc *mly, int bus, struct scsipi_xfer_mode *xm)
 2040 {
 2041         struct mly_btl *btl;
 2042         int s;
 2043 
 2044         btl = &mly->mly_btl[bus][xm->xm_target];
 2045         xm->xm_mode = 0;
 2046 
 2047         s = splbio();
 2048 
 2049         if ((btl->mb_flags & MLY_BTL_PHYSICAL) != 0) {  
 2050                 if (btl->mb_speed == 0) {
 2051                         xm->xm_period = 0;
 2052                         xm->xm_offset = 0;
 2053                 } else {
 2054                         xm->xm_period = 12;                     /* XXX */
 2055                         xm->xm_offset = 8;                      /* XXX */
 2056                         xm->xm_mode |= PERIPH_CAP_SYNC;         /* XXX */
 2057                 }
 2058 
 2059                 switch (btl->mb_width) {
 2060                 case 32:
 2061                         xm->xm_mode = PERIPH_CAP_WIDE32;
 2062                         break;
 2063                 case 16:
 2064                         xm->xm_mode = PERIPH_CAP_WIDE16;
 2065                         break;
 2066                 default:
 2067                         xm->xm_mode = 0;
 2068                         break;
 2069                 }
 2070         } else /* ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) */ {
 2071                 xm->xm_mode = PERIPH_CAP_WIDE16 | PERIPH_CAP_SYNC;
 2072                 xm->xm_period = 12;
 2073                 xm->xm_offset = 8;
 2074         }
 2075 
 2076         if ((btl->mb_flags & MLY_BTL_TQING) != 0)
 2077                 xm->xm_mode |= PERIPH_CAP_TQING;
 2078 
 2079         splx(s);
 2080 
 2081         scsipi_async_event(&mly->mly_chans[bus], ASYNC_EVENT_XFER_MODE, xm);
 2082 }
 2083 
 2084 /*
 2085  * ioctl hook; used here only to initiate low-level rescans.
 2086  */
 2087 static int
 2088 mly_scsipi_ioctl(struct scsipi_channel *chan, u_long cmd, caddr_t data,
 2089                  int flag, struct proc *p)
 2090 {
 2091         struct mly_softc *mly;
 2092         int rv;
 2093   
 2094         mly = (struct mly_softc *)chan->chan_adapter->adapt_dev;
 2095   
 2096         switch (cmd) {
 2097         case SCBUSIOLLSCAN:
 2098                 mly_scan_channel(mly, chan->chan_channel);
 2099                 rv = 0;
 2100                 break;
 2101         default:
 2102                 rv = ENOTTY;
 2103                 break;
 2104         }
 2105 
 2106         return (rv);
 2107 }
 2108 
 2109 /*
 2110  * Handshake with the firmware while the card is being initialized.
 2111  */
 2112 static int
 2113 mly_fwhandshake(struct mly_softc *mly) 
 2114 {
 2115         u_int8_t error, param0, param1;
 2116         int spinup;
 2117 
 2118         spinup = 0;
 2119 
 2120         /* Set HM_STSACK and let the firmware initialize. */
 2121         mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK);
 2122         DELAY(1000);    /* too short? */
 2123 
 2124         /* If HM_STSACK is still true, the controller is initializing. */
 2125         if (!mly_idbr_true(mly, MLY_HM_STSACK))
 2126                 return (0);
 2127 
 2128         printf("%s: controller initialization started\n",
 2129             mly->mly_dv.dv_xname);
 2130 
 2131         /*
 2132          * Spin waiting for initialization to finish, or for a message to be
 2133          * delivered.
 2134          */
 2135         while (mly_idbr_true(mly, MLY_HM_STSACK)) {
 2136                 /* Check for a message */
 2137                 if (!mly_error_valid(mly))
 2138                         continue;
 2139 
 2140                 error = mly_inb(mly, mly->mly_error_status) & ~MLY_MSG_EMPTY;
 2141                 param0 = mly_inb(mly, mly->mly_cmd_mailbox);
 2142                 param1 = mly_inb(mly, mly->mly_cmd_mailbox + 1);
 2143 
 2144                 switch (error) {
 2145                 case MLY_MSG_SPINUP:
 2146                         if (!spinup) {
 2147                                 printf("%s: drive spinup in progress\n",
 2148                                     mly->mly_dv.dv_xname);
 2149                                 spinup = 1;
 2150                         }
 2151                         break;
 2152 
 2153                 case MLY_MSG_RACE_RECOVERY_FAIL:
 2154                         printf("%s: mirror race recovery failed - \n",
 2155                             mly->mly_dv.dv_xname);
 2156                         printf("%s: one or more drives offline\n",
 2157                             mly->mly_dv.dv_xname);
 2158                         break;
 2159 
 2160                 case MLY_MSG_RACE_IN_PROGRESS:
 2161                         printf("%s: mirror race recovery in progress\n",
 2162                             mly->mly_dv.dv_xname);
 2163                         break;
 2164 
 2165                 case MLY_MSG_RACE_ON_CRITICAL:
 2166                         printf("%s: mirror race recovery on critical drive\n",
 2167                             mly->mly_dv.dv_xname);
 2168                         break;
 2169 
 2170                 case MLY_MSG_PARITY_ERROR:
 2171                         printf("%s: FATAL MEMORY PARITY ERROR\n",
 2172                             mly->mly_dv.dv_xname);
 2173                         return (ENXIO);
 2174 
 2175                 default:
 2176                         printf("%s: unknown initialization code 0x%x\n",
 2177                             mly->mly_dv.dv_xname, error);
 2178                         break;
 2179                 }
 2180         }
 2181 
 2182         return (0);
 2183 }
 2184 
 2185 /*
 2186  * Space-fill a character string
 2187  */
 2188 static void
 2189 mly_padstr(char *dst, const char *src, int len)
 2190 {
 2191 
 2192         while (len-- > 0) {
 2193                 if (*src != '\0')
 2194                         *dst++ = *src++;
 2195                 else
 2196                         *dst++ = ' ';
 2197         }
 2198 }
 2199 
 2200 /*
 2201  * Allocate DMA safe memory.
 2202  */
 2203 static int
 2204 mly_dmamem_alloc(struct mly_softc *mly, int size, bus_dmamap_t *dmamap, 
 2205                  caddr_t *kva, bus_addr_t *paddr, bus_dma_segment_t *seg)
 2206 {
 2207         int rseg, rv, state;
 2208 
 2209         state = 0;
 2210         
 2211         if ((rv = bus_dmamem_alloc(mly->mly_dmat, size, PAGE_SIZE, 0, 
 2212             seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
 2213                 printf("%s: dmamem_alloc = %d\n", mly->mly_dv.dv_xname, rv);
 2214                 goto bad;
 2215         }
 2216 
 2217         state++;
 2218 
 2219         if ((rv = bus_dmamem_map(mly->mly_dmat, seg, 1, size, kva,
 2220             BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
 2221                 printf("%s: dmamem_map = %d\n", mly->mly_dv.dv_xname, rv);
 2222                 goto bad;
 2223         }
 2224 
 2225         state++;
 2226 
 2227         if ((rv = bus_dmamap_create(mly->mly_dmat, size, size, 1, 0, 
 2228             BUS_DMA_NOWAIT, dmamap)) != 0) {
 2229                 printf("%s: dmamap_create = %d\n", mly->mly_dv.dv_xname, rv);
 2230                 goto bad;
 2231         }
 2232 
 2233         state++;
 2234 
 2235         if ((rv = bus_dmamap_load(mly->mly_dmat, *dmamap, *kva, size, 
 2236             NULL, BUS_DMA_NOWAIT)) != 0) {
 2237                 printf("%s: dmamap_load = %d\n", mly->mly_dv.dv_xname, rv);
 2238                 goto bad;
 2239         }
 2240 
 2241         *paddr = (*dmamap)->dm_segs[0].ds_addr;
 2242         memset(*kva, 0, size);
 2243         return (0);
 2244 
 2245  bad:
 2246         if (state > 2)
 2247                 bus_dmamap_destroy(mly->mly_dmat, *dmamap);
 2248         if (state > 1)
 2249                 bus_dmamem_unmap(mly->mly_dmat, *kva, size);
 2250         if (state > 0)
 2251                 bus_dmamem_free(mly->mly_dmat, seg, 1);
 2252 
 2253         return (rv);
 2254 }
 2255 
 2256 /*
 2257  * Free DMA safe memory.
 2258  */
 2259 static void
 2260 mly_dmamem_free(struct mly_softc *mly, int size, bus_dmamap_t dmamap, 
 2261                 caddr_t kva, bus_dma_segment_t *seg)
 2262 {
 2263 
 2264         bus_dmamap_unload(mly->mly_dmat, dmamap);
 2265         bus_dmamap_destroy(mly->mly_dmat, dmamap);
 2266         bus_dmamem_unmap(mly->mly_dmat, kva, size);
 2267         bus_dmamem_free(mly->mly_dmat, seg, 1);
 2268 }
 2269 
 2270 
 2271 /*
 2272  * Accept an open operation on the control device.
 2273  */
 2274 int
 2275 mlyopen(dev_t dev, int flag, int mode, struct proc *p)
 2276 {
 2277         struct mly_softc *mly;
 2278 
 2279         if ((mly = device_lookup(&mly_cd, minor(dev))) == NULL)
 2280                 return (ENXIO);
 2281         if ((mly->mly_state & MLY_STATE_INITOK) == 0)
 2282                 return (ENXIO);
 2283         if ((mly->mly_state & MLY_STATE_OPEN) != 0)
 2284                 return (EBUSY);
 2285 
 2286         mly->mly_state |= MLY_STATE_OPEN;
 2287         return (0);
 2288 }
 2289 
 2290 /*
 2291  * Accept the last close on the control device.
 2292  */
 2293 int
 2294 mlyclose(dev_t dev, int flag, int mode, struct proc *p)
 2295 {
 2296         struct mly_softc *mly;
 2297 
 2298         mly = device_lookup(&mly_cd, minor(dev));
 2299         mly->mly_state &= ~MLY_STATE_OPEN;
 2300         return (0);
 2301 }
 2302 
 2303 /*
 2304  * Handle control operations.
 2305  */
 2306 int
 2307 mlyioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
 2308 {
 2309         struct mly_softc *mly;
 2310         int rv;
 2311 
 2312         if (securelevel >= 2)
 2313                 return (EPERM);
 2314 
 2315         mly = device_lookup(&mly_cd, minor(dev));
 2316 
 2317         switch (cmd) {
 2318         case MLYIO_COMMAND:
 2319                 rv = mly_user_command(mly, (void *)data);
 2320                 break;
 2321         case MLYIO_HEALTH:
 2322                 rv = mly_user_health(mly, (void *)data);
 2323                 break;
 2324         default:
 2325                 rv = ENOTTY;
 2326                 break;
 2327         }
 2328 
 2329         return (rv);
 2330 }
 2331 
 2332 /*
 2333  * Execute a command passed in from userspace.
 2334  *
 2335  * The control structure contains the actual command for the controller, as
 2336  * well as the user-space data pointer and data size, and an optional sense
 2337  * buffer size/pointer.  On completion, the data size is adjusted to the
 2338  * command residual, and the sense buffer size to the size of the returned
 2339  * sense data.
 2340  */
 2341 static int
 2342 mly_user_command(struct mly_softc *mly, struct mly_user_command *uc)
 2343 {
 2344         struct mly_ccb  *mc;
 2345         int rv, mapped;
 2346 
 2347         if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
 2348                 return (rv);
 2349 
 2350         mapped = 0;
 2351         mc->mc_data = NULL;
 2352 
 2353         /*
 2354          * Handle data size/direction.
 2355          */
 2356         if ((mc->mc_length = abs(uc->DataTransferLength)) != 0) {
 2357                 if (mc->mc_length > MAXPHYS) {
 2358                         rv = EINVAL;
 2359                         goto out;
 2360                 }
 2361 
 2362                 mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_WAITOK);
 2363                 if (mc->mc_data == NULL) {
 2364                         rv = ENOMEM;
 2365                         goto out;
 2366                 }
 2367 
 2368                 if (uc->DataTransferLength > 0) {
 2369                         mc->mc_flags |= MLY_CCB_DATAIN;
 2370                         memset(mc->mc_data, 0, mc->mc_length);
 2371                 }
 2372         
 2373                 if (uc->DataTransferLength < 0) {
 2374                         mc->mc_flags |= MLY_CCB_DATAOUT;
 2375                         rv = copyin(uc->DataTransferBuffer, mc->mc_data,
 2376                             mc->mc_length);
 2377                         if (rv != 0)
 2378                                 goto out;
 2379                 }
 2380 
 2381                 if ((rv = mly_ccb_map(mly, mc)) != 0)
 2382                         goto out;
 2383                 mapped = 1;
 2384         }
 2385 
 2386         /* Copy in the command and execute it. */
 2387         memcpy(mc->mc_packet, &uc->CommandMailbox, sizeof(uc->CommandMailbox));
 2388 
 2389         if ((rv = mly_ccb_wait(mly, mc, 60000)) != 0)
 2390                 goto out;
 2391 
 2392         /* Return the data to userspace. */
 2393         if (uc->DataTransferLength > 0) {
 2394                 rv = copyout(mc->mc_data, uc->DataTransferBuffer,
 2395                     mc->mc_length);
 2396                 if (rv != 0)
 2397                         goto out;
 2398         }
 2399         
 2400         /* Return the sense buffer to userspace. */
 2401         if (uc->RequestSenseLength > 0 && mc->mc_sense > 0) {
 2402                 rv = copyout(mc->mc_packet, uc->RequestSenseBuffer, 
 2403                     min(uc->RequestSenseLength, mc->mc_sense));
 2404                 if (rv != 0)
 2405                         goto out;
 2406         }
 2407 
 2408         /* Return command results to userspace (caller will copy out). */
 2409         uc->DataTransferLength = mc->mc_resid;
 2410         uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense);
 2411         uc->CommandStatus = mc->mc_status;
 2412         rv = 0;
 2413 
 2414  out:
 2415         if (mapped)
 2416                 mly_ccb_unmap(mly, mc);
 2417         if (mc->mc_data != NULL)
 2418                 free(mc->mc_data, M_DEVBUF);
 2419         if (mc != NULL)
 2420                 mly_ccb_free(mly, mc);
 2421 
 2422         return (rv);
 2423 }
 2424 
 2425 /*
 2426  * Return health status to userspace.  If the health change index in the
 2427  * user structure does not match that currently exported by the controller,
 2428  * we return the current status immediately.  Otherwise, we block until
 2429  * either interrupted or new status is delivered.
 2430  */
 2431 static int
 2432 mly_user_health(struct mly_softc *mly, struct mly_user_health *uh)
 2433 {
 2434         struct mly_health_status mh;
 2435         int rv, s;
 2436         
 2437         /* Fetch the current health status from userspace. */
 2438         rv = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh));
 2439         if (rv != 0)
 2440                 return (rv);
 2441 
 2442         /* spin waiting for a status update */
 2443         s = splbio();
 2444         if (mly->mly_event_change == mh.change_counter)
 2445                 rv = tsleep(&mly->mly_event_change, PRIBIO | PCATCH,
 2446                     "mlyhealth", 0);
 2447         splx(s);
 2448 
 2449         if (rv == 0) {
 2450                 /*
 2451                  * Copy the controller's health status buffer out (there is
 2452                  * a race here if it changes again).
 2453                  */
 2454                 rv = copyout(&mly->mly_mmbox->mmm_health.status,
 2455                     uh->HealthStatusBuffer, sizeof(uh->HealthStatusBuffer));
 2456         }
 2457 
 2458         return (rv);
 2459 }

Cache object: 83c4f4054c5b1b5971d7dde4520a0f55


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.