The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mps/mps.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2009 Yahoo! Inc.
    3  * Copyright (c) 2012 LSI Corp.
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  *
   27  * LSI MPT-Fusion Host Adapter FreeBSD
   28  *
   29  * $FreeBSD: releng/8.4/sys/dev/mps/mps.c 237877 2012-07-01 05:23:59Z ken $
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD: releng/8.4/sys/dev/mps/mps.c 237877 2012-07-01 05:23:59Z ken $");
   34 
   35 /* Communications core for LSI MPT2 */
   36 
   37 /* TODO Move headers to mpsvar */
   38 #include <sys/types.h>
   39 #include <sys/param.h>
   40 #include <sys/systm.h>
   41 #include <sys/kernel.h>
   42 #include <sys/selinfo.h>
   43 #include <sys/lock.h>
   44 #include <sys/mutex.h>
   45 #include <sys/module.h>
   46 #include <sys/bus.h>
   47 #include <sys/conf.h>
   48 #include <sys/bio.h>
   49 #include <sys/malloc.h>
   50 #include <sys/uio.h>
   51 #include <sys/sysctl.h>
   52 #include <sys/queue.h>
   53 #include <sys/kthread.h>
   54 #include <sys/endian.h>
   55 #include <sys/eventhandler.h>
   56 
   57 #include <machine/bus.h>
   58 #include <machine/resource.h>
   59 #include <sys/rman.h>
   60 #include <sys/proc.h>
   61 
   62 #include <dev/pci/pcivar.h>
   63 
   64 #include <cam/scsi/scsi_all.h>
   65 
   66 #include <dev/mps/mpi/mpi2_type.h>
   67 #include <dev/mps/mpi/mpi2.h>
   68 #include <dev/mps/mpi/mpi2_ioc.h>
   69 #include <dev/mps/mpi/mpi2_sas.h>
   70 #include <dev/mps/mpi/mpi2_cnfg.h>
   71 #include <dev/mps/mpi/mpi2_init.h>
   72 #include <dev/mps/mpi/mpi2_tool.h>
   73 #include <dev/mps/mps_ioctl.h>
   74 #include <dev/mps/mpsvar.h>
   75 #include <dev/mps/mps_table.h>
   76 
   77 static int mps_diag_reset(struct mps_softc *sc, int sleep_flag);
   78 static int mps_init_queues(struct mps_softc *sc);
   79 static int mps_message_unit_reset(struct mps_softc *sc, int sleep_flag);
   80 static int mps_transition_operational(struct mps_softc *sc);
   81 static void mps_startup(void *arg);
   82 static int mps_send_iocinit(struct mps_softc *sc);
   83 static int mps_attach_log(struct mps_softc *sc);
   84 static __inline void mps_complete_command(struct mps_command *cm);
   85 static void mps_dispatch_event(struct mps_softc *sc, uintptr_t data,
   86     MPI2_EVENT_NOTIFICATION_REPLY *reply);
   87 static void mps_config_complete(struct mps_softc *sc, struct mps_command *cm);
   88 static void mps_periodic(void *);
   89 static int mps_reregister_events(struct mps_softc *sc);
   90 static void mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm);
   91 static int mps_wait_db_ack(struct mps_softc *sc, int timeout, int sleep_flag);
   92 SYSCTL_NODE(_hw, OID_AUTO, mps, CTLFLAG_RD, 0, "MPS Driver Parameters");
   93 
   94 MALLOC_DEFINE(M_MPT2, "mps", "mpt2 driver memory");
   95 
   96 /*
   97  * Do a "Diagnostic Reset" aka a hard reset.  This should get the chip out of
   98  * any state and back to its initialization state machine.
   99  */
  100 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d };
  101 
  102 /* Added this union to smoothly convert le64toh cm->cm_desc.Words.
  103  * Compiler only support unint64_t to be passed as argument.
  104  * Otherwise it will through below error
  105  * "aggregate value used where an integer was expected"
  106  */
  107 
  108 typedef union _reply_descriptor {
  109         u64 word;
  110         struct {
  111                 u32 low;
  112                 u32 high;
  113         } u;
  114 }reply_descriptor,address_descriptor;
  115 
  116 /* 
  117  * sleep_flag can be either CAN_SLEEP or NO_SLEEP.
  118  * If this function is called from process context, it can sleep
  119  * and there is no harm to sleep, in case if this fuction is called
  120  * from Interrupt handler, we can not sleep and need NO_SLEEP flag set.
  121  * based on sleep flags driver will call either msleep, pause or DELAY.
  122  * msleep and pause are of same variant, but pause is used when mps_mtx
  123  * is not hold by driver.
  124  *
  125  */
  126 static int
  127 mps_diag_reset(struct mps_softc *sc,int sleep_flag)
  128 {
  129         uint32_t reg;
  130         int i, error, tries = 0;
  131 
  132         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  133 
  134         /* Clear any pending interrupts */
  135         mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
  136 
  137         /*Force NO_SLEEP for threads prohibited to sleep
  138         * e.a Thread from interrupt handler are prohibited to sleep.
  139         */      
  140         if(curthread->td_pflags & TDP_NOSLEEPING)
  141                 sleep_flag = NO_SLEEP;
  142  
  143         /* Push the magic sequence */
  144         error = ETIMEDOUT;
  145         while (tries++ < 20) {
  146                 for (i = 0; i < sizeof(mpt2_reset_magic); i++)
  147                         mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET,
  148                             mpt2_reset_magic[i]);
  149                 /* wait 100 msec */
  150                 if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP)
  151                         msleep(&sc->msleep_fake_chan, &sc->mps_mtx, 0, "mpsdiag", hz/10);
  152                 else if (sleep_flag == CAN_SLEEP)
  153                         pause("mpsdiag", hz/10);
  154                 else
  155                         DELAY(100 * 1000);
  156 
  157                 reg = mps_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
  158                 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) {
  159                         error = 0;
  160                         break;
  161                 }
  162         }
  163         if (error)
  164                 return (error);
  165 
  166         /* Send the actual reset.  XXX need to refresh the reg? */
  167         mps_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET,
  168             reg | MPI2_DIAG_RESET_ADAPTER);
  169 
  170         /* Wait up to 300 seconds in 50ms intervals */
  171         error = ETIMEDOUT;
  172         for (i = 0; i < 60000; i++) {
  173                 /* wait 50 msec */
  174                 if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP)
  175                         msleep(&sc->msleep_fake_chan, &sc->mps_mtx, 0, "mpsdiag", hz/20);
  176                 else if (sleep_flag == CAN_SLEEP)
  177                         pause("mpsdiag", hz/20);
  178                 else
  179                         DELAY(50 * 1000);
  180                 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET);
  181                 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) {
  182                         error = 0;
  183                         break;
  184                 }
  185         }
  186         if (error)
  187                 return (error);
  188 
  189         mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0);
  190 
  191         return (0);
  192 }
  193 
  194 static int
  195 mps_message_unit_reset(struct mps_softc *sc, int sleep_flag)
  196 {
  197 
  198         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  199 
  200         mps_regwrite(sc, MPI2_DOORBELL_OFFSET,
  201             MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET <<
  202             MPI2_DOORBELL_FUNCTION_SHIFT);
  203 
  204         if (mps_wait_db_ack(sc, 5, sleep_flag) != 0) {
  205                 mps_dprint(sc, MPS_FAULT, "Doorbell handshake failed : <%s>\n",
  206                                 __func__);
  207                 return (ETIMEDOUT);
  208         }
  209 
  210         return (0);
  211 }
  212 
  213 static int
  214 mps_transition_ready(struct mps_softc *sc)
  215 {
  216         uint32_t reg, state;
  217         int error, tries = 0;
  218         int sleep_flags;
  219 
  220         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  221         /* If we are in attach call, do not sleep */
  222         sleep_flags = (sc->mps_flags & MPS_FLAGS_ATTACH_DONE)
  223                                         ? CAN_SLEEP:NO_SLEEP;
  224         error = 0;
  225         while (tries++ < 5) {
  226                 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET);
  227                 mps_dprint(sc, MPS_INFO, "Doorbell= 0x%x\n", reg);
  228 
  229                 /*
  230                  * Ensure the IOC is ready to talk.  If it's not, try
  231                  * resetting it.
  232                  */
  233                 if (reg & MPI2_DOORBELL_USED) {
  234                         mps_diag_reset(sc, sleep_flags);
  235                         DELAY(50000);
  236                         continue;
  237                 }
  238 
  239                 /* Is the adapter owned by another peer? */
  240                 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) ==
  241                     (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) {
  242                         device_printf(sc->mps_dev, "IOC is under the control "
  243                             "of another peer host, aborting initialization.\n");
  244                         return (ENXIO);
  245                 }
  246                 
  247                 state = reg & MPI2_IOC_STATE_MASK;
  248                 if (state == MPI2_IOC_STATE_READY) {
  249                         /* Ready to go! */
  250                         error = 0;
  251                         break;
  252                 } else if (state == MPI2_IOC_STATE_FAULT) {
  253                         mps_dprint(sc, MPS_INFO, "IOC in fault state 0x%x\n",
  254                             state & MPI2_DOORBELL_FAULT_CODE_MASK);
  255                         mps_diag_reset(sc, sleep_flags);
  256                 } else if (state == MPI2_IOC_STATE_OPERATIONAL) {
  257                         /* Need to take ownership */
  258                         mps_message_unit_reset(sc, sleep_flags);
  259                 } else if (state == MPI2_IOC_STATE_RESET) {
  260                         /* Wait a bit, IOC might be in transition */
  261                         mps_dprint(sc, MPS_FAULT,
  262                             "IOC in unexpected reset state\n");
  263                 } else {
  264                         mps_dprint(sc, MPS_FAULT,
  265                             "IOC in unknown state 0x%x\n", state);
  266                         error = EINVAL;
  267                         break;
  268                 }
  269         
  270                 /* Wait 50ms for things to settle down. */
  271                 DELAY(50000);
  272         }
  273 
  274         if (error)
  275                 device_printf(sc->mps_dev, "Cannot transition IOC to ready\n");
  276 
  277         return (error);
  278 }
  279 
  280 static int
  281 mps_transition_operational(struct mps_softc *sc)
  282 {
  283         uint32_t reg, state;
  284         int error;
  285 
  286         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  287 
  288         error = 0;
  289         reg = mps_regread(sc, MPI2_DOORBELL_OFFSET);
  290         mps_dprint(sc, MPS_INFO, "Doorbell= 0x%x\n", reg);
  291 
  292         state = reg & MPI2_IOC_STATE_MASK;
  293         if (state != MPI2_IOC_STATE_READY) {
  294                 if ((error = mps_transition_ready(sc)) != 0) {
  295                         mps_dprint(sc, MPS_FAULT, 
  296                             "%s failed to transition ready\n", __func__);
  297                         return (error);
  298                 }
  299         }
  300 
  301         error = mps_send_iocinit(sc);
  302         return (error);
  303 }
  304 
  305 /* 
  306  * XXX Some of this should probably move to mps.c
  307  *
  308  * The terms diag reset and hard reset are used interchangeably in the MPI
  309  * docs to mean resetting the controller chip.  In this code diag reset
  310  * cleans everything up, and the hard reset function just sends the reset
  311  * sequence to the chip.  This should probably be refactored so that every
  312  * subsystem gets a reset notification of some sort, and can clean up
  313  * appropriately.
  314  */
  315 int
  316 mps_reinit(struct mps_softc *sc)
  317 {
  318         int error;
  319         uint32_t db;
  320 
  321         mps_printf(sc, "%s sc %p\n", __func__, sc);
  322 
  323         mtx_assert(&sc->mps_mtx, MA_OWNED);
  324 
  325         if (sc->mps_flags & MPS_FLAGS_DIAGRESET) {
  326                 mps_printf(sc, "%s reset already in progress\n", __func__);
  327                 return 0;
  328         }
  329 
  330         /* make sure the completion callbacks can recognize they're getting
  331          * a NULL cm_reply due to a reset.
  332          */
  333         sc->mps_flags |= MPS_FLAGS_DIAGRESET;
  334 
  335         mps_printf(sc, "%s mask interrupts\n", __func__);
  336         mps_mask_intr(sc);
  337 
  338         error = mps_diag_reset(sc, CAN_SLEEP);
  339         if (error != 0) {
  340                 panic("%s hard reset failed with error %d\n",
  341                     __func__, error);
  342         }
  343 
  344         /* Restore the PCI state, including the MSI-X registers */
  345         mps_pci_restore(sc);
  346 
  347         /* Give the I/O subsystem special priority to get itself prepared */
  348         mpssas_handle_reinit(sc);
  349 
  350         /* reinitialize queues after the reset */
  351         bzero(sc->free_queue, sc->fqdepth * 4);
  352         mps_init_queues(sc);
  353 
  354         /* get the chip out of the reset state */
  355         error = mps_transition_operational(sc);
  356         if (error != 0)
  357                 panic("%s transition operational failed with error %d\n",
  358                     __func__, error);
  359 
  360         /* Reinitialize the reply queue. This is delicate because this
  361          * function is typically invoked by task mgmt completion callbacks,
  362          * which are called by the interrupt thread.  We need to make sure
  363          * the interrupt handler loop will exit when we return to it, and
  364          * that it will recognize the indexes we've changed.
  365          */
  366         sc->replypostindex = 0;
  367         mps_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex);
  368         mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, sc->replypostindex);
  369 
  370         db = mps_regread(sc, MPI2_DOORBELL_OFFSET);
  371         mps_printf(sc, "%s doorbell 0x%08x\n", __func__, db);
  372 
  373         mps_printf(sc, "%s unmask interrupts post %u free %u\n", __func__,
  374             sc->replypostindex, sc->replyfreeindex);
  375 
  376         mps_unmask_intr(sc);
  377 
  378         mps_printf(sc, "%s restarting post %u free %u\n", __func__,
  379             sc->replypostindex, sc->replyfreeindex);
  380 
  381         /* restart will reload the event masks clobbered by the reset, and
  382          * then enable the port.
  383          */
  384         mps_reregister_events(sc);
  385 
  386         /* the end of discovery will release the simq, so we're done. */
  387         mps_printf(sc, "%s finished sc %p post %u free %u\n", 
  388             __func__, sc, 
  389             sc->replypostindex, sc->replyfreeindex);
  390 
  391         sc->mps_flags &= ~MPS_FLAGS_DIAGRESET;
  392 
  393         return 0;
  394 }
  395 
  396 /* Wait for the chip to ACK a word that we've put into its FIFO 
  397  * Wait for <timeout> seconds. In single loop wait for busy loop
  398  * for 500 microseconds.
  399  * Total is [ 0.5 * (2000 * <timeout>) ] in miliseconds.
  400  * */
  401 static int
  402 mps_wait_db_ack(struct mps_softc *sc, int timeout, int sleep_flag)
  403 {
  404 
  405         u32 cntdn, count;
  406         u32 int_status;
  407         u32 doorbell;
  408 
  409         count = 0;
  410         cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
  411         do {
  412                 int_status = mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
  413                 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
  414                         mps_dprint(sc, MPS_INFO, 
  415                         "%s: successfull count(%d), timeout(%d)\n",
  416                         __func__, count, timeout);
  417                 return 0;
  418                 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
  419                         doorbell = mps_regread(sc, MPI2_DOORBELL_OFFSET);
  420                         if ((doorbell & MPI2_IOC_STATE_MASK) ==
  421                                 MPI2_IOC_STATE_FAULT) {
  422                                 mps_dprint(sc, MPS_FAULT, 
  423                                         "fault_state(0x%04x)!\n", doorbell);
  424                                 return (EFAULT);
  425                         }
  426                 } else if (int_status == 0xFFFFFFFF)
  427                         goto out;
  428 
  429                 /* If it can sleep, sleep for 1 milisecond, else busy loop for 
  430                 * 0.5 milisecond */
  431                 if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP)
  432                         msleep(&sc->msleep_fake_chan, &sc->mps_mtx, 0, 
  433                         "mpsdba", hz/1000);
  434                 else if (sleep_flag == CAN_SLEEP)
  435                         pause("mpsdba", hz/1000);
  436                 else
  437                         DELAY(500);
  438                 count++;
  439         } while (--cntdn);
  440 
  441         out:
  442         mps_dprint(sc, MPS_FAULT, "%s: failed due to timeout count(%d), "
  443                 "int_status(%x)!\n", __func__, count, int_status);
  444         return (ETIMEDOUT);
  445 
  446 }
  447 
  448 /* Wait for the chip to signal that the next word in its FIFO can be fetched */
  449 static int
  450 mps_wait_db_int(struct mps_softc *sc)
  451 {
  452         int retry;
  453 
  454         for (retry = 0; retry < MPS_DB_MAX_WAIT; retry++) {
  455                 if ((mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) &
  456                     MPI2_HIS_IOC2SYS_DB_STATUS) != 0)
  457                         return (0);
  458                 DELAY(2000);
  459         }
  460         return (ETIMEDOUT);
  461 }
  462 
  463 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */
  464 static int
  465 mps_request_sync(struct mps_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply,
  466     int req_sz, int reply_sz, int timeout)
  467 {
  468         uint32_t *data32;
  469         uint16_t *data16;
  470         int i, count, ioc_sz, residual;
  471         int sleep_flags = CAN_SLEEP;
  472         
  473         if(curthread->td_pflags & TDP_NOSLEEPING)
  474                 sleep_flags = NO_SLEEP;
  475 
  476         /* Step 1 */
  477         mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
  478 
  479         /* Step 2 */
  480         if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
  481                 return (EBUSY);
  482 
  483         /* Step 3
  484          * Announce that a message is coming through the doorbell.  Messages
  485          * are pushed at 32bit words, so round up if needed.
  486          */
  487         count = (req_sz + 3) / 4;
  488         mps_regwrite(sc, MPI2_DOORBELL_OFFSET,
  489             (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) |
  490             (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT));
  491 
  492         /* Step 4 */
  493         if (mps_wait_db_int(sc) ||
  494             (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) {
  495                 mps_dprint(sc, MPS_FAULT, "Doorbell failed to activate\n");
  496                 return (ENXIO);
  497         }
  498         mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
  499         if (mps_wait_db_ack(sc, 5, sleep_flags) != 0) {
  500                 mps_dprint(sc, MPS_FAULT, "Doorbell handshake failed\n");
  501                 return (ENXIO);
  502         }
  503 
  504         /* Step 5 */
  505         /* Clock out the message data synchronously in 32-bit dwords*/
  506         data32 = (uint32_t *)req;
  507         for (i = 0; i < count; i++) {
  508                 mps_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i]));
  509                 if (mps_wait_db_ack(sc, 5, sleep_flags) != 0) {
  510                         mps_dprint(sc, MPS_FAULT,
  511                             "Timeout while writing doorbell\n");
  512                         return (ENXIO);
  513                 }
  514         }
  515 
  516         /* Step 6 */
  517         /* Clock in the reply in 16-bit words.  The total length of the
  518          * message is always in the 4th byte, so clock out the first 2 words
  519          * manually, then loop the rest.
  520          */
  521         data16 = (uint16_t *)reply;
  522         if (mps_wait_db_int(sc) != 0) {
  523                 mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 0\n");
  524                 return (ENXIO);
  525         }
  526         data16[0] =
  527             mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
  528         mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
  529         if (mps_wait_db_int(sc) != 0) {
  530                 mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 1\n");
  531                 return (ENXIO);
  532         }
  533         data16[1] =
  534             mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
  535         mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
  536 
  537         /* Number of 32bit words in the message */
  538         ioc_sz = reply->MsgLength;
  539 
  540         /*
  541          * Figure out how many 16bit words to clock in without overrunning.
  542          * The precision loss with dividing reply_sz can safely be
  543          * ignored because the messages can only be multiples of 32bits.
  544          */
  545         residual = 0;
  546         count = MIN((reply_sz / 4), ioc_sz) * 2;
  547         if (count < ioc_sz * 2) {
  548                 residual = ioc_sz * 2 - count;
  549                 mps_dprint(sc, MPS_FAULT, "Driver error, throwing away %d "
  550                     "residual message words\n", residual);
  551         }
  552 
  553         for (i = 2; i < count; i++) {
  554                 if (mps_wait_db_int(sc) != 0) {
  555                         mps_dprint(sc, MPS_FAULT,
  556                             "Timeout reading doorbell %d\n", i);
  557                         return (ENXIO);
  558                 }
  559                 data16[i] = mps_regread(sc, MPI2_DOORBELL_OFFSET) &
  560                     MPI2_DOORBELL_DATA_MASK;
  561                 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
  562         }
  563 
  564         /*
  565          * Pull out residual words that won't fit into the provided buffer.
  566          * This keeps the chip from hanging due to a driver programming
  567          * error.
  568          */
  569         while (residual--) {
  570                 if (mps_wait_db_int(sc) != 0) {
  571                         mps_dprint(sc, MPS_FAULT,
  572                             "Timeout reading doorbell\n");
  573                         return (ENXIO);
  574                 }
  575                 (void)mps_regread(sc, MPI2_DOORBELL_OFFSET);
  576                 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
  577         }
  578 
  579         /* Step 7 */
  580         if (mps_wait_db_int(sc) != 0) {
  581                 mps_dprint(sc, MPS_FAULT, "Timeout waiting to exit doorbell\n");
  582                 return (ENXIO);
  583         }
  584         if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
  585                 mps_dprint(sc, MPS_FAULT, "Warning, doorbell still active\n");
  586         mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
  587 
  588         return (0);
  589 }
  590 
  591 static void
  592 mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm)
  593 {
  594         reply_descriptor rd;
  595         mps_dprint(sc, MPS_TRACE, "%s SMID %u cm %p ccb %p\n", __func__,
  596             cm->cm_desc.Default.SMID, cm, cm->cm_ccb);
  597 
  598         if (sc->mps_flags & MPS_FLAGS_ATTACH_DONE && !(sc->mps_flags & MPS_FLAGS_SHUTDOWN))
  599                 mtx_assert(&sc->mps_mtx, MA_OWNED);
  600 
  601         if (++sc->io_cmds_active > sc->io_cmds_highwater)
  602                 sc->io_cmds_highwater++;
  603         rd.u.low = cm->cm_desc.Words.Low;
  604         rd.u.high = cm->cm_desc.Words.High;
  605         rd.word = htole64(rd.word);
  606         /* TODO-We may need to make below regwrite atomic */
  607         mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET,
  608             rd.u.low);
  609         mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET,
  610             rd.u.high);
  611 }
  612 
  613 /*
  614  * Just the FACTS, ma'am.
  615  */
  616 static int
  617 mps_get_iocfacts(struct mps_softc *sc, MPI2_IOC_FACTS_REPLY *facts)
  618 {
  619         MPI2_DEFAULT_REPLY *reply;
  620         MPI2_IOC_FACTS_REQUEST request;
  621         int error, req_sz, reply_sz;
  622 
  623         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  624 
  625         req_sz = sizeof(MPI2_IOC_FACTS_REQUEST);
  626         reply_sz = sizeof(MPI2_IOC_FACTS_REPLY);
  627         reply = (MPI2_DEFAULT_REPLY *)facts;
  628 
  629         bzero(&request, req_sz);
  630         request.Function = MPI2_FUNCTION_IOC_FACTS;
  631         error = mps_request_sync(sc, &request, reply, req_sz, reply_sz, 5);
  632 
  633         return (error);
  634 }
  635 
  636 static int
  637 mps_get_portfacts(struct mps_softc *sc, MPI2_PORT_FACTS_REPLY *facts, int port)
  638 {
  639         MPI2_PORT_FACTS_REQUEST *request;
  640         MPI2_PORT_FACTS_REPLY *reply;
  641         struct mps_command *cm;
  642         int error;
  643 
  644         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  645 
  646         if ((cm = mps_alloc_command(sc)) == NULL)
  647                 return (EBUSY);
  648         request = (MPI2_PORT_FACTS_REQUEST *)cm->cm_req;
  649         request->Function = MPI2_FUNCTION_PORT_FACTS;
  650         request->PortNumber = port;
  651         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
  652         cm->cm_data = NULL;
  653         error = mps_request_polled(sc, cm);
  654         reply = (MPI2_PORT_FACTS_REPLY *)cm->cm_reply;
  655         if (reply == NULL) {
  656                 mps_printf(sc, "%s NULL reply\n", __func__);
  657                 goto done;
  658         }
  659         if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) {
  660                 mps_printf(sc, 
  661                     "%s error %d iocstatus 0x%x iocloginfo 0x%x type 0x%x\n",
  662                     __func__, error, reply->IOCStatus, reply->IOCLogInfo, 
  663                     reply->PortType);
  664                 error = ENXIO;
  665         }
  666         bcopy(reply, facts, sizeof(MPI2_PORT_FACTS_REPLY));
  667 done:
  668         mps_free_command(sc, cm);
  669 
  670         return (error);
  671 }
  672 
  673 static int
  674 mps_send_iocinit(struct mps_softc *sc)
  675 {
  676         MPI2_IOC_INIT_REQUEST   init;
  677         MPI2_DEFAULT_REPLY      reply;
  678         int req_sz, reply_sz, error;
  679 
  680         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  681 
  682         req_sz = sizeof(MPI2_IOC_INIT_REQUEST);
  683         reply_sz = sizeof(MPI2_IOC_INIT_REPLY);
  684         bzero(&init, req_sz);
  685         bzero(&reply, reply_sz);
  686 
  687         /*
  688          * Fill in the init block.  Note that most addresses are
  689          * deliberately in the lower 32bits of memory.  This is a micro-
  690          * optimzation for PCI/PCIX, though it's not clear if it helps PCIe.
  691          */
  692         init.Function = MPI2_FUNCTION_IOC_INIT;
  693         init.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
  694         init.MsgVersion = htole16(MPI2_VERSION);
  695         init.HeaderVersion = htole16(MPI2_HEADER_VERSION);
  696         init.SystemRequestFrameSize = htole16(sc->facts->IOCRequestFrameSize);
  697         init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth);
  698         init.ReplyFreeQueueDepth = htole16(sc->fqdepth);
  699         init.SenseBufferAddressHigh = 0;
  700         init.SystemReplyAddressHigh = 0;
  701         init.SystemRequestFrameBaseAddress.High = 0;
  702         init.SystemRequestFrameBaseAddress.Low = htole32((uint32_t)sc->req_busaddr);
  703         init.ReplyDescriptorPostQueueAddress.High = 0;
  704         init.ReplyDescriptorPostQueueAddress.Low = htole32((uint32_t)sc->post_busaddr);
  705         init.ReplyFreeQueueAddress.High = 0;
  706         init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr);
  707         init.TimeStamp.High = 0;
  708         init.TimeStamp.Low = htole32((uint32_t)time_uptime);
  709 
  710         error = mps_request_sync(sc, &init, &reply, req_sz, reply_sz, 5);
  711         if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
  712                 error = ENXIO;
  713 
  714         mps_dprint(sc, MPS_INFO, "IOCInit status= 0x%x\n", reply.IOCStatus);
  715         return (error);
  716 }
  717 
  718 void
  719 mps_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  720 {
  721         bus_addr_t *addr;
  722 
  723         addr = arg;
  724         *addr = segs[0].ds_addr;
  725 }
  726 
  727 static int
  728 mps_alloc_queues(struct mps_softc *sc)
  729 {
  730         bus_addr_t queues_busaddr;
  731         uint8_t *queues;
  732         int qsize, fqsize, pqsize;
  733 
  734         /*
  735          * The reply free queue contains 4 byte entries in multiples of 16 and
  736          * aligned on a 16 byte boundary. There must always be an unused entry.
  737          * This queue supplies fresh reply frames for the firmware to use.
  738          *
  739          * The reply descriptor post queue contains 8 byte entries in
  740          * multiples of 16 and aligned on a 16 byte boundary.  This queue
  741          * contains filled-in reply frames sent from the firmware to the host.
  742          *
  743          * These two queues are allocated together for simplicity.
  744          */
  745         sc->fqdepth = roundup2((sc->num_replies + 1), 16);
  746         sc->pqdepth = roundup2((sc->num_replies + 1), 16);
  747         fqsize= sc->fqdepth * 4;
  748         pqsize = sc->pqdepth * 8;
  749         qsize = fqsize + pqsize;
  750 
  751         if (bus_dma_tag_create( sc->mps_parent_dmat,    /* parent */
  752                                 16, 0,                  /* algnmnt, boundary */
  753                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  754                                 BUS_SPACE_MAXADDR,      /* highaddr */
  755                                 NULL, NULL,             /* filter, filterarg */
  756                                 qsize,                  /* maxsize */
  757                                 1,                      /* nsegments */
  758                                 qsize,                  /* maxsegsize */
  759                                 0,                      /* flags */
  760                                 NULL, NULL,             /* lockfunc, lockarg */
  761                                 &sc->queues_dmat)) {
  762                 device_printf(sc->mps_dev, "Cannot allocate queues DMA tag\n");
  763                 return (ENOMEM);
  764         }
  765         if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT,
  766             &sc->queues_map)) {
  767                 device_printf(sc->mps_dev, "Cannot allocate queues memory\n");
  768                 return (ENOMEM);
  769         }
  770         bzero(queues, qsize);
  771         bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize,
  772             mps_memaddr_cb, &queues_busaddr, 0);
  773 
  774         sc->free_queue = (uint32_t *)queues;
  775         sc->free_busaddr = queues_busaddr;
  776         sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize);
  777         sc->post_busaddr = queues_busaddr + fqsize;
  778 
  779         return (0);
  780 }
  781 
  782 static int
  783 mps_alloc_replies(struct mps_softc *sc)
  784 {
  785         int rsize, num_replies;
  786 
  787         /*
  788          * sc->num_replies should be one less than sc->fqdepth.  We need to
  789          * allocate space for sc->fqdepth replies, but only sc->num_replies
  790          * replies can be used at once.
  791          */
  792         num_replies = max(sc->fqdepth, sc->num_replies);
  793 
  794         rsize = sc->facts->ReplyFrameSize * num_replies * 4; 
  795         if (bus_dma_tag_create( sc->mps_parent_dmat,    /* parent */
  796                                 4, 0,                   /* algnmnt, boundary */
  797                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  798                                 BUS_SPACE_MAXADDR,      /* highaddr */
  799                                 NULL, NULL,             /* filter, filterarg */
  800                                 rsize,                  /* maxsize */
  801                                 1,                      /* nsegments */
  802                                 rsize,                  /* maxsegsize */
  803                                 0,                      /* flags */
  804                                 NULL, NULL,             /* lockfunc, lockarg */
  805                                 &sc->reply_dmat)) {
  806                 device_printf(sc->mps_dev, "Cannot allocate replies DMA tag\n");
  807                 return (ENOMEM);
  808         }
  809         if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames,
  810             BUS_DMA_NOWAIT, &sc->reply_map)) {
  811                 device_printf(sc->mps_dev, "Cannot allocate replies memory\n");
  812                 return (ENOMEM);
  813         }
  814         bzero(sc->reply_frames, rsize);
  815         bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize,
  816             mps_memaddr_cb, &sc->reply_busaddr, 0);
  817 
  818         return (0);
  819 }
  820 
  821 static int
  822 mps_alloc_requests(struct mps_softc *sc)
  823 {
  824         struct mps_command *cm;
  825         struct mps_chain *chain;
  826         int i, rsize, nsegs;
  827 
  828         rsize = sc->facts->IOCRequestFrameSize * sc->num_reqs * 4;
  829         if (bus_dma_tag_create( sc->mps_parent_dmat,    /* parent */
  830                                 16, 0,                  /* algnmnt, boundary */
  831                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  832                                 BUS_SPACE_MAXADDR,      /* highaddr */
  833                                 NULL, NULL,             /* filter, filterarg */
  834                                 rsize,                  /* maxsize */
  835                                 1,                      /* nsegments */
  836                                 rsize,                  /* maxsegsize */
  837                                 0,                      /* flags */
  838                                 NULL, NULL,             /* lockfunc, lockarg */
  839                                 &sc->req_dmat)) {
  840                 device_printf(sc->mps_dev, "Cannot allocate request DMA tag\n");
  841                 return (ENOMEM);
  842         }
  843         if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames,
  844             BUS_DMA_NOWAIT, &sc->req_map)) {
  845                 device_printf(sc->mps_dev, "Cannot allocate request memory\n");
  846                 return (ENOMEM);
  847         }
  848         bzero(sc->req_frames, rsize);
  849         bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize,
  850             mps_memaddr_cb, &sc->req_busaddr, 0);
  851 
  852         rsize = sc->facts->IOCRequestFrameSize * sc->max_chains * 4;
  853         if (bus_dma_tag_create( sc->mps_parent_dmat,    /* parent */
  854                                 16, 0,                  /* algnmnt, boundary */
  855                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  856                                 BUS_SPACE_MAXADDR,      /* highaddr */
  857                                 NULL, NULL,             /* filter, filterarg */
  858                                 rsize,                  /* maxsize */
  859                                 1,                      /* nsegments */
  860                                 rsize,                  /* maxsegsize */
  861                                 0,                      /* flags */
  862                                 NULL, NULL,             /* lockfunc, lockarg */
  863                                 &sc->chain_dmat)) {
  864                 device_printf(sc->mps_dev, "Cannot allocate chain DMA tag\n");
  865                 return (ENOMEM);
  866         }
  867         if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames,
  868             BUS_DMA_NOWAIT, &sc->chain_map)) {
  869                 device_printf(sc->mps_dev, "Cannot allocate chain memory\n");
  870                 return (ENOMEM);
  871         }
  872         bzero(sc->chain_frames, rsize);
  873         bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, rsize,
  874             mps_memaddr_cb, &sc->chain_busaddr, 0);
  875 
  876         rsize = MPS_SENSE_LEN * sc->num_reqs;
  877         if (bus_dma_tag_create( sc->mps_parent_dmat,    /* parent */
  878                                 1, 0,                   /* algnmnt, boundary */
  879                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  880                                 BUS_SPACE_MAXADDR,      /* highaddr */
  881                                 NULL, NULL,             /* filter, filterarg */
  882                                 rsize,                  /* maxsize */
  883                                 1,                      /* nsegments */
  884                                 rsize,                  /* maxsegsize */
  885                                 0,                      /* flags */
  886                                 NULL, NULL,             /* lockfunc, lockarg */
  887                                 &sc->sense_dmat)) {
  888                 device_printf(sc->mps_dev, "Cannot allocate sense DMA tag\n");
  889                 return (ENOMEM);
  890         }
  891         if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames,
  892             BUS_DMA_NOWAIT, &sc->sense_map)) {
  893                 device_printf(sc->mps_dev, "Cannot allocate sense memory\n");
  894                 return (ENOMEM);
  895         }
  896         bzero(sc->sense_frames, rsize);
  897         bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize,
  898             mps_memaddr_cb, &sc->sense_busaddr, 0);
  899 
  900         sc->chains = malloc(sizeof(struct mps_chain) * sc->max_chains, M_MPT2,
  901             M_WAITOK | M_ZERO);
  902         if(!sc->chains) {
  903                 device_printf(sc->mps_dev, 
  904                 "Cannot allocate chains memory %s %d\n",
  905                  __func__, __LINE__);
  906                 return (ENOMEM);
  907         }
  908         for (i = 0; i < sc->max_chains; i++) {
  909                 chain = &sc->chains[i];
  910                 chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames +
  911                     i * sc->facts->IOCRequestFrameSize * 4);
  912                 chain->chain_busaddr = sc->chain_busaddr +
  913                     i * sc->facts->IOCRequestFrameSize * 4;
  914                 mps_free_chain(sc, chain);
  915                 sc->chain_free_lowwater++;
  916         }
  917 
  918         /* XXX Need to pick a more precise value */
  919         nsegs = (MAXPHYS / PAGE_SIZE) + 1;
  920         if (bus_dma_tag_create( sc->mps_parent_dmat,    /* parent */
  921                                 1, 0,                   /* algnmnt, boundary */
  922                                 BUS_SPACE_MAXADDR,      /* lowaddr */
  923                                 BUS_SPACE_MAXADDR,      /* highaddr */
  924                                 NULL, NULL,             /* filter, filterarg */
  925                                 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
  926                                 nsegs,                  /* nsegments */
  927                                 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
  928                                 BUS_DMA_ALLOCNOW,       /* flags */
  929                                 busdma_lock_mutex,      /* lockfunc */
  930                                 &sc->mps_mtx,           /* lockarg */
  931                                 &sc->buffer_dmat)) {
  932                 device_printf(sc->mps_dev, "Cannot allocate buffer DMA tag\n");
  933                 return (ENOMEM);
  934         }
  935 
  936         /*
  937          * SMID 0 cannot be used as a free command per the firmware spec.
  938          * Just drop that command instead of risking accounting bugs.
  939          */
  940         sc->commands = malloc(sizeof(struct mps_command) * sc->num_reqs,
  941             M_MPT2, M_WAITOK | M_ZERO);
  942         if(!sc->commands) {
  943                 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
  944                  __func__, __LINE__);
  945                 return (ENOMEM);
  946         }
  947         for (i = 1; i < sc->num_reqs; i++) {
  948                 cm = &sc->commands[i];
  949                 cm->cm_req = sc->req_frames +
  950                     i * sc->facts->IOCRequestFrameSize * 4;
  951                 cm->cm_req_busaddr = sc->req_busaddr +
  952                     i * sc->facts->IOCRequestFrameSize * 4;
  953                 cm->cm_sense = &sc->sense_frames[i];
  954                 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPS_SENSE_LEN;
  955                 cm->cm_desc.Default.SMID = i;
  956                 cm->cm_sc = sc;
  957                 TAILQ_INIT(&cm->cm_chain_list);
  958                 callout_init_mtx(&cm->cm_callout, &sc->mps_mtx, 0);
  959 
  960                 /* XXX Is a failure here a critical problem? */
  961                 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) == 0)
  962                         if (i <= sc->facts->HighPriorityCredit)
  963                                 mps_free_high_priority_command(sc, cm);
  964                         else
  965                                 mps_free_command(sc, cm);
  966                 else {
  967                         panic("failed to allocate command %d\n", i);
  968                         sc->num_reqs = i;
  969                         break;
  970                 }
  971         }
  972 
  973         return (0);
  974 }
  975 
  976 static int
  977 mps_init_queues(struct mps_softc *sc)
  978 {
  979         int i;
  980 
  981         memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8);
  982 
  983         /*
  984          * According to the spec, we need to use one less reply than we
  985          * have space for on the queue.  So sc->num_replies (the number we
  986          * use) should be less than sc->fqdepth (allocated size).
  987          */
  988         if (sc->num_replies >= sc->fqdepth)
  989                 return (EINVAL);
  990 
  991         /*
  992          * Initialize all of the free queue entries.
  993          */
  994         for (i = 0; i < sc->fqdepth; i++)
  995                 sc->free_queue[i] = sc->reply_busaddr + (i * sc->facts->ReplyFrameSize * 4);
  996         sc->replyfreeindex = sc->num_replies;
  997 
  998         return (0);
  999 }
 1000 
 1001 /* Get the driver parameter tunables.  Lowest priority are the driver defaults.
 1002  * Next are the global settings, if they exist.  Highest are the per-unit
 1003  * settings, if they exist.
 1004  */
 1005 static void
 1006 mps_get_tunables(struct mps_softc *sc)
 1007 {
 1008         char tmpstr[80];
 1009 
 1010         /* XXX default to some debugging for now */
 1011         sc->mps_debug = MPS_FAULT;
 1012         sc->disable_msix = 0;
 1013         sc->disable_msi = 0;
 1014         sc->max_chains = MPS_CHAIN_FRAMES;
 1015 
 1016         /*
 1017          * Grab the global variables.
 1018          */
 1019         TUNABLE_INT_FETCH("hw.mps.debug_level", &sc->mps_debug);
 1020         TUNABLE_INT_FETCH("hw.mps.disable_msix", &sc->disable_msix);
 1021         TUNABLE_INT_FETCH("hw.mps.disable_msi", &sc->disable_msi);
 1022         TUNABLE_INT_FETCH("hw.mps.max_chains", &sc->max_chains);
 1023 
 1024         /* Grab the unit-instance variables */
 1025         snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.debug_level",
 1026             device_get_unit(sc->mps_dev));
 1027         TUNABLE_INT_FETCH(tmpstr, &sc->mps_debug);
 1028 
 1029         snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.disable_msix",
 1030             device_get_unit(sc->mps_dev));
 1031         TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix);
 1032 
 1033         snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.disable_msi",
 1034             device_get_unit(sc->mps_dev));
 1035         TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi);
 1036 
 1037         snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_chains",
 1038             device_get_unit(sc->mps_dev));
 1039         TUNABLE_INT_FETCH(tmpstr, &sc->max_chains);
 1040 }
 1041 
 1042 static void
 1043 mps_setup_sysctl(struct mps_softc *sc)
 1044 {
 1045         struct sysctl_ctx_list  *sysctl_ctx = NULL;
 1046         struct sysctl_oid       *sysctl_tree = NULL;
 1047         char tmpstr[80], tmpstr2[80];
 1048 
 1049         /*
 1050          * Setup the sysctl variable so the user can change the debug level
 1051          * on the fly.
 1052          */
 1053         snprintf(tmpstr, sizeof(tmpstr), "MPS controller %d",
 1054             device_get_unit(sc->mps_dev));
 1055         snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mps_dev));
 1056 
 1057         sysctl_ctx = device_get_sysctl_ctx(sc->mps_dev);
 1058         if (sysctl_ctx != NULL)
 1059                 sysctl_tree = device_get_sysctl_tree(sc->mps_dev);
 1060 
 1061         if (sysctl_tree == NULL) {
 1062                 sysctl_ctx_init(&sc->sysctl_ctx);
 1063                 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
 1064                     SYSCTL_STATIC_CHILDREN(_hw_mps), OID_AUTO, tmpstr2,
 1065                     CTLFLAG_RD, 0, tmpstr);
 1066                 if (sc->sysctl_tree == NULL)
 1067                         return;
 1068                 sysctl_ctx = &sc->sysctl_ctx;
 1069                 sysctl_tree = sc->sysctl_tree;
 1070         }
 1071 
 1072         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
 1073             OID_AUTO, "debug_level", CTLFLAG_RW, &sc->mps_debug, 0,
 1074             "mps debug level");
 1075 
 1076         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
 1077             OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0,
 1078             "Disable the use of MSI-X interrupts");
 1079 
 1080         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
 1081             OID_AUTO, "disable_msi", CTLFLAG_RD, &sc->disable_msi, 0,
 1082             "Disable the use of MSI interrupts");
 1083 
 1084         SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
 1085             OID_AUTO, "firmware_version", CTLFLAG_RW, &sc->fw_version,
 1086             strlen(sc->fw_version), "firmware version");
 1087 
 1088         SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
 1089             OID_AUTO, "driver_version", CTLFLAG_RW, MPS_DRIVER_VERSION,
 1090             strlen(MPS_DRIVER_VERSION), "driver version");
 1091 
 1092         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
 1093             OID_AUTO, "io_cmds_active", CTLFLAG_RD,
 1094             &sc->io_cmds_active, 0, "number of currently active commands");
 1095 
 1096         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
 1097             OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
 1098             &sc->io_cmds_highwater, 0, "maximum active commands seen");
 1099 
 1100         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
 1101             OID_AUTO, "chain_free", CTLFLAG_RD,
 1102             &sc->chain_free, 0, "number of free chain elements");
 1103 
 1104         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
 1105             OID_AUTO, "chain_free_lowwater", CTLFLAG_RD,
 1106             &sc->chain_free_lowwater, 0,"lowest number of free chain elements");
 1107 
 1108         SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
 1109             OID_AUTO, "max_chains", CTLFLAG_RD,
 1110             &sc->max_chains, 0,"maximum chain frames that will be allocated");
 1111 
 1112 #if __FreeBSD_version >= 900030
 1113         SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
 1114             OID_AUTO, "chain_alloc_fail", CTLFLAG_RD,
 1115             &sc->chain_alloc_fail, "chain allocation failures");
 1116 #endif //FreeBSD_version >= 900030
 1117 }
 1118 
 1119 int
 1120 mps_attach(struct mps_softc *sc)
 1121 {
 1122         int i, error;
 1123 
 1124         mps_get_tunables(sc);
 1125 
 1126         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
 1127 
 1128         mtx_init(&sc->mps_mtx, "MPT2SAS lock", NULL, MTX_DEF);
 1129         callout_init_mtx(&sc->periodic, &sc->mps_mtx, 0);
 1130         TAILQ_INIT(&sc->event_list);
 1131 
 1132         if ((error = mps_transition_ready(sc)) != 0) {
 1133                 mps_printf(sc, "%s failed to transition ready\n", __func__);
 1134                 return (error);
 1135         }
 1136 
 1137         sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPT2,
 1138             M_ZERO|M_NOWAIT);
 1139         if(!sc->facts) {
 1140                 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
 1141                  __func__, __LINE__);
 1142                 return (ENOMEM);
 1143         }
 1144         if ((error = mps_get_iocfacts(sc, sc->facts)) != 0)
 1145                 return (error);
 1146 
 1147         mps_print_iocfacts(sc, sc->facts);
 1148 
 1149         snprintf(sc->fw_version, sizeof(sc->fw_version), 
 1150             "%02d.%02d.%02d.%02d", 
 1151             sc->facts->FWVersion.Struct.Major,
 1152             sc->facts->FWVersion.Struct.Minor,
 1153             sc->facts->FWVersion.Struct.Unit,
 1154             sc->facts->FWVersion.Struct.Dev);
 1155 
 1156         mps_printf(sc, "Firmware: %s, Driver: %s\n", sc->fw_version,
 1157             MPS_DRIVER_VERSION);
 1158         mps_printf(sc, "IOCCapabilities: %b\n", sc->facts->IOCCapabilities,
 1159             "\2" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf"
 1160             "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR"
 1161             "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc");
 1162 
 1163         /*
 1164          * If the chip doesn't support event replay then a hard reset will be
 1165          * required to trigger a full discovery.  Do the reset here then
 1166          * retransition to Ready.  A hard reset might have already been done,
 1167          * but it doesn't hurt to do it again.
 1168          */
 1169         if ((sc->facts->IOCCapabilities &
 1170             MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0) {
 1171                 mps_diag_reset(sc, NO_SLEEP);
 1172                 if ((error = mps_transition_ready(sc)) != 0)
 1173                         return (error);
 1174         }
 1175 
 1176         /*
 1177          * Set flag if IR Firmware is loaded.
 1178          */
 1179         if (sc->facts->IOCCapabilities &
 1180             MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)
 1181                 sc->ir_firmware = 1;
 1182 
 1183         /*
 1184          * Check if controller supports FW diag buffers and set flag to enable
 1185          * each type.
 1186          */
 1187         if (sc->facts->IOCCapabilities &
 1188             MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
 1189                 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE].enabled =
 1190                     TRUE;
 1191         if (sc->facts->IOCCapabilities &
 1192             MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
 1193                 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT].enabled =
 1194                     TRUE;
 1195         if (sc->facts->IOCCapabilities &
 1196             MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
 1197                 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED].enabled =
 1198                     TRUE;
 1199 
 1200         /*
 1201          * Set flag if EEDP is supported and if TLR is supported.
 1202          */
 1203         if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP)
 1204                 sc->eedp_enabled = TRUE;
 1205         if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)
 1206                 sc->control_TLR = TRUE;
 1207 
 1208         /*
 1209          * Size the queues. Since the reply queues always need one free entry,
 1210          * we'll just deduct one reply message here.
 1211          */
 1212         sc->num_reqs = MIN(MPS_REQ_FRAMES, sc->facts->RequestCredit);
 1213         sc->num_replies = MIN(MPS_REPLY_FRAMES + MPS_EVT_REPLY_FRAMES,
 1214             sc->facts->MaxReplyDescriptorPostQueueDepth) - 1;
 1215         TAILQ_INIT(&sc->req_list);
 1216         TAILQ_INIT(&sc->high_priority_req_list);
 1217         TAILQ_INIT(&sc->chain_list);
 1218         TAILQ_INIT(&sc->tm_list);
 1219 
 1220         if (((error = mps_alloc_queues(sc)) != 0) ||
 1221             ((error = mps_alloc_replies(sc)) != 0) ||
 1222             ((error = mps_alloc_requests(sc)) != 0)) {
 1223                 mps_printf(sc, "%s failed to alloc\n", __func__);
 1224                 mps_free(sc);
 1225                 return (error);
 1226         }
 1227 
 1228         if (((error = mps_init_queues(sc)) != 0) ||
 1229             ((error = mps_transition_operational(sc)) != 0)) {
 1230                 mps_printf(sc, "%s failed to transition operational\n", __func__);
 1231                 mps_free(sc);
 1232                 return (error);
 1233         }
 1234 
 1235         /*
 1236          * Finish the queue initialization.
 1237          * These are set here instead of in mps_init_queues() because the
 1238          * IOC resets these values during the state transition in
 1239          * mps_transition_operational().  The free index is set to 1
 1240          * because the corresponding index in the IOC is set to 0, and the
 1241          * IOC treats the queues as full if both are set to the same value.
 1242          * Hence the reason that the queue can't hold all of the possible
 1243          * replies.
 1244          */
 1245         sc->replypostindex = 0;
 1246         mps_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex);
 1247         mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0);
 1248 
 1249         sc->pfacts = malloc(sizeof(MPI2_PORT_FACTS_REPLY) *
 1250             sc->facts->NumberOfPorts, M_MPT2, M_ZERO|M_WAITOK);
 1251         if(!sc->pfacts) {
 1252                 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
 1253                  __func__, __LINE__);
 1254                 return (ENOMEM);
 1255         }
 1256         for (i = 0; i < sc->facts->NumberOfPorts; i++) {
 1257                 if ((error = mps_get_portfacts(sc, &sc->pfacts[i], i)) != 0) {
 1258                         mps_printf(sc, "%s failed to get portfacts for port %d\n",
 1259                             __func__, i);
 1260                         mps_free(sc);
 1261                         return (error);
 1262                 }
 1263                 mps_print_portfacts(sc, &sc->pfacts[i]);
 1264         }
 1265 
 1266         /* Attach the subsystems so they can prepare their event masks. */
 1267         /* XXX Should be dynamic so that IM/IR and user modules can attach */
 1268         if (((error = mps_attach_log(sc)) != 0) ||
 1269             ((error = mps_attach_sas(sc)) != 0) ||
 1270             ((error = mps_attach_user(sc)) != 0)) {
 1271                 mps_printf(sc, "%s failed to attach all subsystems: error %d\n",
 1272                     __func__, error);
 1273                 mps_free(sc);
 1274                 return (error);
 1275         }
 1276 
 1277         if ((error = mps_pci_setup_interrupts(sc)) != 0) {
 1278                 mps_printf(sc, "%s failed to setup interrupts\n", __func__);
 1279                 mps_free(sc);
 1280                 return (error);
 1281         }
 1282 
 1283         /*
 1284          * The static page function currently read is ioc page8.  Others can be
 1285          * added in future.
 1286          */
 1287         mps_base_static_config_pages(sc);
 1288 
 1289         /* Start the periodic watchdog check on the IOC Doorbell */
 1290         mps_periodic(sc);
 1291 
 1292         /*
 1293          * The portenable will kick off discovery events that will drive the
 1294          * rest of the initialization process.  The CAM/SAS module will
 1295          * hold up the boot sequence until discovery is complete.
 1296          */
 1297         sc->mps_ich.ich_func = mps_startup;
 1298         sc->mps_ich.ich_arg = sc;
 1299         if (config_intrhook_establish(&sc->mps_ich) != 0) {
 1300                 mps_dprint(sc, MPS_FAULT, "Cannot establish MPS config hook\n");
 1301                 error = EINVAL;
 1302         }
 1303 
 1304         /*
 1305          * Allow IR to shutdown gracefully when shutdown occurs.
 1306          */
 1307         sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
 1308             mpssas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT);
 1309 
 1310         if (sc->shutdown_eh == NULL)
 1311                 mps_dprint(sc, MPS_FAULT, "shutdown event registration "
 1312                     "failed\n");
 1313 
 1314         mps_setup_sysctl(sc);
 1315 
 1316         sc->mps_flags |= MPS_FLAGS_ATTACH_DONE;
 1317 
 1318         return (error);
 1319 }
 1320 
 1321 /* Run through any late-start handlers. */
 1322 static void
 1323 mps_startup(void *arg)
 1324 {
 1325         struct mps_softc *sc;
 1326 
 1327         sc = (struct mps_softc *)arg;
 1328 
 1329         mps_lock(sc);
 1330         mps_unmask_intr(sc);
 1331         /* initialize device mapping tables */
 1332         mps_mapping_initialize(sc);
 1333         mpssas_startup(sc);
 1334         mps_unlock(sc);
 1335 }
 1336 
 1337 /* Periodic watchdog.  Is called with the driver lock already held. */
 1338 static void
 1339 mps_periodic(void *arg)
 1340 {
 1341         struct mps_softc *sc;
 1342         uint32_t db;
 1343 
 1344         sc = (struct mps_softc *)arg;
 1345         if (sc->mps_flags & MPS_FLAGS_SHUTDOWN)
 1346                 return;
 1347 
 1348         db = mps_regread(sc, MPI2_DOORBELL_OFFSET);
 1349         if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
 1350                 device_printf(sc->mps_dev, "IOC Fault 0x%08x, Resetting\n", db);
 1351 
 1352                 mps_reinit(sc);
 1353         }
 1354 
 1355         callout_reset(&sc->periodic, MPS_PERIODIC_DELAY * hz, mps_periodic, sc);
 1356 }
 1357 
 1358 static void
 1359 mps_log_evt_handler(struct mps_softc *sc, uintptr_t data,
 1360     MPI2_EVENT_NOTIFICATION_REPLY *event)
 1361 {
 1362         MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry;
 1363 
 1364         mps_print_event(sc, event);
 1365 
 1366         switch (event->Event) {
 1367         case MPI2_EVENT_LOG_DATA:
 1368                 device_printf(sc->mps_dev, "MPI2_EVENT_LOG_DATA:\n");
 1369                 hexdump(event->EventData, event->EventDataLength, NULL, 0);
 1370                 break;
 1371         case MPI2_EVENT_LOG_ENTRY_ADDED:
 1372                 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData;
 1373                 mps_dprint(sc, MPS_INFO, "MPI2_EVENT_LOG_ENTRY_ADDED event "
 1374                     "0x%x Sequence %d:\n", entry->LogEntryQualifier,
 1375                      entry->LogSequence);
 1376                 break;
 1377         default:
 1378                 break;
 1379         }
 1380         return;
 1381 }
 1382 
 1383 static int
 1384 mps_attach_log(struct mps_softc *sc)
 1385 {
 1386         u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
 1387 
 1388         bzero(events, 16);
 1389         setbit(events, MPI2_EVENT_LOG_DATA);
 1390         setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
 1391 
 1392         mps_register_events(sc, events, mps_log_evt_handler, NULL,
 1393             &sc->mps_log_eh);
 1394 
 1395         return (0);
 1396 }
 1397 
 1398 static int
 1399 mps_detach_log(struct mps_softc *sc)
 1400 {
 1401 
 1402         if (sc->mps_log_eh != NULL)
 1403                 mps_deregister_events(sc, sc->mps_log_eh);
 1404         return (0);
 1405 }
 1406 
 1407 /*
 1408  * Free all of the driver resources and detach submodules.  Should be called
 1409  * without the lock held.
 1410  */
 1411 int
 1412 mps_free(struct mps_softc *sc)
 1413 {
 1414         struct mps_command *cm;
 1415         int i, error;
 1416 
 1417         /* Turn off the watchdog */
 1418         mps_lock(sc);
 1419         sc->mps_flags |= MPS_FLAGS_SHUTDOWN;
 1420         mps_unlock(sc);
 1421         /* Lock must not be held for this */
 1422         callout_drain(&sc->periodic);
 1423 
 1424         if (((error = mps_detach_log(sc)) != 0) ||
 1425             ((error = mps_detach_sas(sc)) != 0))
 1426                 return (error);
 1427 
 1428         mps_detach_user(sc);
 1429 
 1430         /* Put the IOC back in the READY state. */
 1431         mps_lock(sc);
 1432         if ((error = mps_transition_ready(sc)) != 0) {
 1433                 mps_unlock(sc);
 1434                 return (error);
 1435         }
 1436         mps_unlock(sc);
 1437 
 1438         if (sc->facts != NULL)
 1439                 free(sc->facts, M_MPT2);
 1440 
 1441         if (sc->pfacts != NULL)
 1442                 free(sc->pfacts, M_MPT2);
 1443 
 1444         if (sc->post_busaddr != 0)
 1445                 bus_dmamap_unload(sc->queues_dmat, sc->queues_map);
 1446         if (sc->post_queue != NULL)
 1447                 bus_dmamem_free(sc->queues_dmat, sc->post_queue,
 1448                     sc->queues_map);
 1449         if (sc->queues_dmat != NULL)
 1450                 bus_dma_tag_destroy(sc->queues_dmat);
 1451 
 1452         if (sc->chain_busaddr != 0)
 1453                 bus_dmamap_unload(sc->chain_dmat, sc->chain_map);
 1454         if (sc->chain_frames != NULL)
 1455                 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,sc->chain_map);
 1456         if (sc->chain_dmat != NULL)
 1457                 bus_dma_tag_destroy(sc->chain_dmat);
 1458 
 1459         if (sc->sense_busaddr != 0)
 1460                 bus_dmamap_unload(sc->sense_dmat, sc->sense_map);
 1461         if (sc->sense_frames != NULL)
 1462                 bus_dmamem_free(sc->sense_dmat, sc->sense_frames,sc->sense_map);
 1463         if (sc->sense_dmat != NULL)
 1464                 bus_dma_tag_destroy(sc->sense_dmat);
 1465 
 1466         if (sc->reply_busaddr != 0)
 1467                 bus_dmamap_unload(sc->reply_dmat, sc->reply_map);
 1468         if (sc->reply_frames != NULL)
 1469                 bus_dmamem_free(sc->reply_dmat, sc->reply_frames,sc->reply_map);
 1470         if (sc->reply_dmat != NULL)
 1471                 bus_dma_tag_destroy(sc->reply_dmat);
 1472 
 1473         if (sc->req_busaddr != 0)
 1474                 bus_dmamap_unload(sc->req_dmat, sc->req_map);
 1475         if (sc->req_frames != NULL)
 1476                 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map);
 1477         if (sc->req_dmat != NULL)
 1478                 bus_dma_tag_destroy(sc->req_dmat);
 1479 
 1480         if (sc->chains != NULL)
 1481                 free(sc->chains, M_MPT2);
 1482         if (sc->commands != NULL) {
 1483                 for (i = 1; i < sc->num_reqs; i++) {
 1484                         cm = &sc->commands[i];
 1485                         bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap);
 1486                 }
 1487                 free(sc->commands, M_MPT2);
 1488         }
 1489         if (sc->buffer_dmat != NULL)
 1490                 bus_dma_tag_destroy(sc->buffer_dmat);
 1491 
 1492         if (sc->sysctl_tree != NULL)
 1493                 sysctl_ctx_free(&sc->sysctl_ctx);
 1494 
 1495         mps_mapping_free_memory(sc);
 1496 
 1497         /* Deregister the shutdown function */
 1498         if (sc->shutdown_eh != NULL)
 1499                 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh);
 1500 
 1501         mtx_destroy(&sc->mps_mtx);
 1502 
 1503         return (0);
 1504 }
 1505 
 1506 static __inline void
 1507 mps_complete_command(struct mps_command *cm)
 1508 {
 1509         if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
 1510                 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
 1511 
 1512         if (cm->cm_complete != NULL) {
 1513                 mps_dprint(cm->cm_sc, MPS_TRACE,
 1514                            "%s cm %p calling cm_complete %p data %p reply %p\n",
 1515                            __func__, cm, cm->cm_complete, cm->cm_complete_data,
 1516                            cm->cm_reply);
 1517                 cm->cm_complete(cm->cm_sc, cm);
 1518         }
 1519 
 1520         if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
 1521                 mps_dprint(cm->cm_sc, MPS_TRACE, "%s: waking up %p\n",
 1522                            __func__, cm);
 1523                 wakeup(cm);
 1524         }
 1525 
 1526         if (cm->cm_sc->io_cmds_active != 0) {
 1527                 cm->cm_sc->io_cmds_active--;
 1528         } else {
 1529                 mps_dprint(cm->cm_sc, MPS_INFO, "Warning: io_cmds_active is "
 1530                     "out of sync - resynching to 0\n");
 1531         }
 1532 }
 1533 
 1534 
 1535 static void
 1536 mps_sas_log_info(struct mps_softc *sc , u32 log_info)
 1537 {
 1538         union loginfo_type {
 1539                 u32     loginfo;
 1540                 struct {
 1541                         u32     subcode:16;
 1542                         u32     code:8;
 1543                         u32     originator:4;
 1544                         u32     bus_type:4;
 1545                 } dw;
 1546         };
 1547         union loginfo_type sas_loginfo;
 1548         char *originator_str = NULL;
 1549 
 1550         sas_loginfo.loginfo = log_info;
 1551         if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
 1552                 return;
 1553 
 1554         /* each nexus loss loginfo */
 1555         if (log_info == 0x31170000)
 1556                 return;
 1557 
 1558         /* eat the loginfos associated with task aborts */
 1559         if ((log_info == 30050000 || log_info ==
 1560             0x31140000 || log_info == 0x31130000))
 1561                 return;
 1562 
 1563         switch (sas_loginfo.dw.originator) {
 1564         case 0:
 1565                 originator_str = "IOP";
 1566                 break;
 1567         case 1:
 1568                 originator_str = "PL";
 1569                 break;
 1570         case 2:
 1571                 originator_str = "IR";
 1572                 break;
 1573 }
 1574 
 1575         mps_dprint(sc, MPS_INFO, "log_info(0x%08x): originator(%s), "
 1576         "code(0x%02x), sub_code(0x%04x)\n", log_info,
 1577         originator_str, sas_loginfo.dw.code,
 1578         sas_loginfo.dw.subcode);
 1579 }
 1580 
 1581 static void
 1582 mps_display_reply_info(struct mps_softc *sc, uint8_t *reply)
 1583 {
 1584         MPI2DefaultReply_t *mpi_reply;
 1585         u16 sc_status;
 1586 
 1587         mpi_reply = (MPI2DefaultReply_t*)reply;
 1588         sc_status = le16toh(mpi_reply->IOCStatus);
 1589         if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
 1590                 mps_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo));
 1591 }
 1592 void
 1593 mps_intr(void *data)
 1594 {
 1595         struct mps_softc *sc;
 1596         uint32_t status;
 1597 
 1598         sc = (struct mps_softc *)data;
 1599         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
 1600 
 1601         /*
 1602          * Check interrupt status register to flush the bus.  This is
 1603          * needed for both INTx interrupts and driver-driven polling
 1604          */
 1605         status = mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
 1606         if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0)
 1607                 return;
 1608 
 1609         mps_lock(sc);
 1610         mps_intr_locked(data);
 1611         mps_unlock(sc);
 1612         return;
 1613 }
 1614 
 1615 /*
 1616  * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the
 1617  * chip.  Hopefully this theory is correct.
 1618  */
 1619 void
 1620 mps_intr_msi(void *data)
 1621 {
 1622         struct mps_softc *sc;
 1623 
 1624         sc = (struct mps_softc *)data;
 1625         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
 1626         mps_lock(sc);
 1627         mps_intr_locked(data);
 1628         mps_unlock(sc);
 1629         return;
 1630 }
 1631 
 1632 /*
 1633  * The locking is overly broad and simplistic, but easy to deal with for now.
 1634  */
 1635 void
 1636 mps_intr_locked(void *data)
 1637 {
 1638         MPI2_REPLY_DESCRIPTORS_UNION *desc;
 1639         struct mps_softc *sc;
 1640         struct mps_command *cm = NULL;
 1641         uint8_t flags;
 1642         u_int pq;
 1643         MPI2_DIAG_RELEASE_REPLY *rel_rep;
 1644         mps_fw_diagnostic_buffer_t *pBuffer;
 1645 
 1646         sc = (struct mps_softc *)data;
 1647 
 1648         pq = sc->replypostindex;
 1649         mps_dprint(sc, MPS_TRACE,
 1650             "%s sc %p starting with replypostindex %u\n", 
 1651             __func__, sc, sc->replypostindex);
 1652 
 1653         for ( ;; ) {
 1654                 cm = NULL;
 1655                 desc = &sc->post_queue[sc->replypostindex];
 1656                 flags = desc->Default.ReplyFlags &
 1657                     MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
 1658                 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
 1659                  || (le32toh(desc->Words.High) == 0xffffffff))
 1660                         break;
 1661 
 1662                 /* increment the replypostindex now, so that event handlers
 1663                  * and cm completion handlers which decide to do a diag
 1664                  * reset can zero it without it getting incremented again
 1665                  * afterwards, and we break out of this loop on the next
 1666                  * iteration since the reply post queue has been cleared to
 1667                  * 0xFF and all descriptors look unused (which they are).
 1668                  */
 1669                 if (++sc->replypostindex >= sc->pqdepth)
 1670                         sc->replypostindex = 0;
 1671 
 1672                 switch (flags) {
 1673                 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS:
 1674                         cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)];
 1675                         cm->cm_reply = NULL;
 1676                         break;
 1677                 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY:
 1678                 {
 1679                         uint32_t baddr;
 1680                         uint8_t *reply;
 1681 
 1682                         /*
 1683                          * Re-compose the reply address from the address
 1684                          * sent back from the chip.  The ReplyFrameAddress
 1685                          * is the lower 32 bits of the physical address of
 1686                          * particular reply frame.  Convert that address to
 1687                          * host format, and then use that to provide the
 1688                          * offset against the virtual address base
 1689                          * (sc->reply_frames).
 1690                          */
 1691                         baddr = le32toh(desc->AddressReply.ReplyFrameAddress);
 1692                         reply = sc->reply_frames +
 1693                                 (baddr - ((uint32_t)sc->reply_busaddr));
 1694                         /*
 1695                          * Make sure the reply we got back is in a valid
 1696                          * range.  If not, go ahead and panic here, since
 1697                          * we'll probably panic as soon as we deference the
 1698                          * reply pointer anyway.
 1699                          */
 1700                         if ((reply < sc->reply_frames)
 1701                          || (reply > (sc->reply_frames +
 1702                              (sc->fqdepth * sc->facts->ReplyFrameSize * 4)))) {
 1703                                 printf("%s: WARNING: reply %p out of range!\n",
 1704                                        __func__, reply);
 1705                                 printf("%s: reply_frames %p, fqdepth %d, "
 1706                                        "frame size %d\n", __func__,
 1707                                        sc->reply_frames, sc->fqdepth,
 1708                                        sc->facts->ReplyFrameSize * 4);
 1709                                 printf("%s: baddr %#x,\n", __func__, baddr);
 1710                                 /* LSI-TODO. See Linux Code. Need Gracefull exit*/
 1711                                 panic("Reply address out of range");
 1712                         }
 1713                         if (le16toh(desc->AddressReply.SMID) == 0) {
 1714                                 if (((MPI2_DEFAULT_REPLY *)reply)->Function ==
 1715                                     MPI2_FUNCTION_DIAG_BUFFER_POST) {
 1716                                         /*
 1717                                          * If SMID is 0 for Diag Buffer Post,
 1718                                          * this implies that the reply is due to
 1719                                          * a release function with a status that
 1720                                          * the buffer has been released.  Set
 1721                                          * the buffer flags accordingly.
 1722                                          */
 1723                                         rel_rep =
 1724                                             (MPI2_DIAG_RELEASE_REPLY *)reply;
 1725                                         if (le16toh(rel_rep->IOCStatus) ==
 1726                                             MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED)
 1727                                             {
 1728                                                 pBuffer =
 1729                                                     &sc->fw_diag_buffer_list[
 1730                                                     rel_rep->BufferType];
 1731                                                 pBuffer->valid_data = TRUE;
 1732                                                 pBuffer->owned_by_firmware =
 1733                                                     FALSE;
 1734                                                 pBuffer->immediate = FALSE;
 1735                                         }
 1736                                 } else
 1737                                         mps_dispatch_event(sc, baddr,
 1738                                             (MPI2_EVENT_NOTIFICATION_REPLY *)
 1739                                             reply);
 1740                         } else {
 1741                                 cm = &sc->commands[le16toh(desc->AddressReply.SMID)];
 1742                                 cm->cm_reply = reply;
 1743                                 cm->cm_reply_data =
 1744                                     le32toh(desc->AddressReply.ReplyFrameAddress);
 1745                         }
 1746                         break;
 1747                 }
 1748                 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS:
 1749                 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER:
 1750                 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS:
 1751                 default:
 1752                         /* Unhandled */
 1753                         device_printf(sc->mps_dev, "Unhandled reply 0x%x\n",
 1754                             desc->Default.ReplyFlags);
 1755                         cm = NULL;
 1756                         break;
 1757                 }
 1758                 
 1759 
 1760                 if (cm != NULL) {
 1761                         // Print Error reply frame
 1762                         if (cm->cm_reply)
 1763                                 mps_display_reply_info(sc,cm->cm_reply);
 1764                         mps_complete_command(cm);
 1765                 }
 1766 
 1767                 desc->Words.Low = 0xffffffff;
 1768                 desc->Words.High = 0xffffffff;
 1769         }
 1770 
 1771         if (pq != sc->replypostindex) {
 1772                 mps_dprint(sc, MPS_TRACE,
 1773                     "%s sc %p writing postindex %d\n",
 1774                     __func__, sc, sc->replypostindex);
 1775                 mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, sc->replypostindex);
 1776         }
 1777 
 1778         return;
 1779 }
 1780 
 1781 static void
 1782 mps_dispatch_event(struct mps_softc *sc, uintptr_t data,
 1783     MPI2_EVENT_NOTIFICATION_REPLY *reply)
 1784 {
 1785         struct mps_event_handle *eh;
 1786         int event, handled = 0;
 1787 
 1788         event = le16toh(reply->Event);
 1789         TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
 1790                 if (isset(eh->mask, event)) {
 1791                         eh->callback(sc, data, reply);
 1792                         handled++;
 1793                 }
 1794         }
 1795 
 1796         if (handled == 0)
 1797                 device_printf(sc->mps_dev, "Unhandled event 0x%x\n", le16toh(event));
 1798 
 1799         /*
 1800          * This is the only place that the event/reply should be freed.
 1801          * Anything wanting to hold onto the event data should have
 1802          * already copied it into their own storage.
 1803          */
 1804         mps_free_reply(sc, data);
 1805 }
 1806 
 1807 static void
 1808 mps_reregister_events_complete(struct mps_softc *sc, struct mps_command *cm)
 1809 {
 1810         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
 1811 
 1812         if (cm->cm_reply)
 1813                 mps_print_event(sc,
 1814                         (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply);
 1815 
 1816         mps_free_command(sc, cm);
 1817 
 1818         /* next, send a port enable */
 1819         mpssas_startup(sc);
 1820 }
 1821 
 1822 /*
 1823  * For both register_events and update_events, the caller supplies a bitmap
 1824  * of events that it _wants_.  These functions then turn that into a bitmask
 1825  * suitable for the controller.
 1826  */
 1827 int
 1828 mps_register_events(struct mps_softc *sc, u32 *mask,
 1829     mps_evt_callback_t *cb, void *data, struct mps_event_handle **handle)
 1830 {
 1831         struct mps_event_handle *eh;
 1832         int error = 0;
 1833 
 1834         eh = malloc(sizeof(struct mps_event_handle), M_MPT2, M_WAITOK|M_ZERO);
 1835         if(!eh) {
 1836                 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
 1837                  __func__, __LINE__);
 1838                 return (ENOMEM);
 1839         }
 1840         eh->callback = cb;
 1841         eh->data = data;
 1842         TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list);
 1843         if (mask != NULL)
 1844                 error = mps_update_events(sc, eh, mask);
 1845         *handle = eh;
 1846 
 1847         return (error);
 1848 }
 1849 
 1850 int
 1851 mps_update_events(struct mps_softc *sc, struct mps_event_handle *handle,
 1852     u32 *mask)
 1853 {
 1854         MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
 1855         MPI2_EVENT_NOTIFICATION_REPLY *reply;
 1856         struct mps_command *cm;
 1857         int error, i;
 1858 
 1859         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
 1860 
 1861         if ((mask != NULL) && (handle != NULL))
 1862                 bcopy(mask, &handle->mask[0], sizeof(u32) * 
 1863                                 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS);
 1864     
 1865         for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
 1866                 sc->event_mask[i] = -1;
 1867 
 1868         for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
 1869                 sc->event_mask[i] &= ~handle->mask[i];
 1870 
 1871 
 1872         if ((cm = mps_alloc_command(sc)) == NULL)
 1873                 return (EBUSY);
 1874         evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
 1875         evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
 1876         evtreq->MsgFlags = 0;
 1877         evtreq->SASBroadcastPrimitiveMasks = 0;
 1878 #ifdef MPS_DEBUG_ALL_EVENTS
 1879         {
 1880                 u_char fullmask[16];
 1881                 memset(fullmask, 0x00, 16);
 1882                 bcopy(fullmask, &evtreq->EventMasks[0], sizeof(u32) * 
 1883                                 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS);
 1884         }
 1885 #else
 1886         for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
 1887                 evtreq->EventMasks[i] =
 1888                     htole32(sc->event_mask[i]);
 1889 #endif
 1890         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
 1891         cm->cm_data = NULL;
 1892 
 1893         error = mps_request_polled(sc, cm);
 1894         reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply;
 1895         if ((reply == NULL) ||
 1896             (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
 1897                 error = ENXIO;
 1898         mps_print_event(sc, reply);
 1899         mps_dprint(sc, MPS_TRACE, "%s finished error %d\n", __func__, error);
 1900 
 1901         mps_free_command(sc, cm);
 1902         return (error);
 1903 }
 1904 
 1905 static int
 1906 mps_reregister_events(struct mps_softc *sc)
 1907 {
 1908         MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
 1909         struct mps_command *cm;
 1910         struct mps_event_handle *eh;
 1911         int error, i;
 1912 
 1913         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
 1914 
 1915         /* first, reregister events */
 1916 
 1917     for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
 1918                 sc->event_mask[i] = -1;
 1919 
 1920         TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
 1921                 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
 1922                         sc->event_mask[i] &= ~eh->mask[i];
 1923         }
 1924 
 1925         if ((cm = mps_alloc_command(sc)) == NULL)
 1926                 return (EBUSY);
 1927         evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
 1928         evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
 1929         evtreq->MsgFlags = 0;
 1930         evtreq->SASBroadcastPrimitiveMasks = 0;
 1931 #ifdef MPS_DEBUG_ALL_EVENTS
 1932         {
 1933                 u_char fullmask[16];
 1934                 memset(fullmask, 0x00, 16);
 1935                 bcopy(fullmask, &evtreq->EventMasks[0], sizeof(u32) *
 1936                         MPI2_EVENT_NOTIFY_EVENTMASK_WORDS);
 1937         }
 1938 #else
 1939         for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
 1940                 evtreq->EventMasks[i] =
 1941                     htole32(sc->event_mask[i]);
 1942 #endif
 1943         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
 1944         cm->cm_data = NULL;
 1945         cm->cm_complete = mps_reregister_events_complete;
 1946 
 1947         error = mps_map_command(sc, cm);
 1948 
 1949         mps_dprint(sc, MPS_TRACE, "%s finished with error %d\n", __func__, error);
 1950         return (error);
 1951 }
 1952 
 1953 void
 1954 mps_deregister_events(struct mps_softc *sc, struct mps_event_handle *handle)
 1955 {
 1956 
 1957         TAILQ_REMOVE(&sc->event_list, handle, eh_list);
 1958         free(handle, M_MPT2);
 1959 }
 1960 
 1961 /*
 1962  * Add a chain element as the next SGE for the specified command.
 1963  * Reset cm_sge and cm_sgesize to indicate all the available space.
 1964  */
 1965 static int
 1966 mps_add_chain(struct mps_command *cm)
 1967 {
 1968         MPI2_SGE_CHAIN32 *sgc;
 1969         struct mps_chain *chain;
 1970         int space;
 1971 
 1972         if (cm->cm_sglsize < MPS_SGC_SIZE)
 1973                 panic("MPS: Need SGE Error Code\n");
 1974 
 1975         chain = mps_alloc_chain(cm->cm_sc);
 1976         if (chain == NULL)
 1977                 return (ENOBUFS);
 1978 
 1979         space = (int)cm->cm_sc->facts->IOCRequestFrameSize * 4;
 1980 
 1981         /*
 1982          * Note: a double-linked list is used to make it easier to
 1983          * walk for debugging.
 1984          */
 1985         TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link);
 1986 
 1987         sgc = (MPI2_SGE_CHAIN32 *)&cm->cm_sge->MpiChain;
 1988         sgc->Length = htole16(space);
 1989         sgc->NextChainOffset = 0;
 1990         /* TODO Looks like bug in Setting sgc->Flags. 
 1991          *      sgc->Flags = ( MPI2_SGE_FLAGS_CHAIN_ELEMENT | MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
 1992          *                  MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT
 1993          *      This is fine.. because we are not using simple element. In case of 
 1994          *      MPI2_SGE_CHAIN32, we have seperate Length and Flags feild.
 1995          */
 1996         sgc->Flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT;
 1997         sgc->Address = htole32(chain->chain_busaddr);
 1998 
 1999         cm->cm_sge = (MPI2_SGE_IO_UNION *)&chain->chain->MpiSimple;
 2000         cm->cm_sglsize = space;
 2001         return (0);
 2002 }
 2003 
 2004 /*
 2005  * Add one scatter-gather element (chain, simple, transaction context)
 2006  * to the scatter-gather list for a command.  Maintain cm_sglsize and
 2007  * cm_sge as the remaining size and pointer to the next SGE to fill
 2008  * in, respectively.
 2009  */
 2010 int
 2011 mps_push_sge(struct mps_command *cm, void *sgep, size_t len, int segsleft)
 2012 {
 2013         MPI2_SGE_TRANSACTION_UNION *tc = sgep;
 2014         MPI2_SGE_SIMPLE64 *sge = sgep;
 2015         int error, type;
 2016         uint32_t saved_buf_len, saved_address_low, saved_address_high;
 2017         u32 sge_flags;
 2018 
 2019         type = (tc->Flags & MPI2_SGE_FLAGS_ELEMENT_MASK);
 2020 
 2021 #ifdef INVARIANTS
 2022         switch (type) {
 2023         case MPI2_SGE_FLAGS_TRANSACTION_ELEMENT: {
 2024                 if (len != tc->DetailsLength + 4)
 2025                         panic("TC %p length %u or %zu?", tc,
 2026                             tc->DetailsLength + 4, len);
 2027                 }
 2028                 break;
 2029         case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
 2030                 /* Driver only uses 32-bit chain elements */
 2031                 if (len != MPS_SGC_SIZE)
 2032                         panic("CHAIN %p length %u or %zu?", sgep,
 2033                             MPS_SGC_SIZE, len);
 2034                 break;
 2035         case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
 2036                 /* Driver only uses 64-bit SGE simple elements */
 2037                 sge = sgep;
 2038                 if (len != MPS_SGE64_SIZE)
 2039                         panic("SGE simple %p length %u or %zu?", sge,
 2040                             MPS_SGE64_SIZE, len);
 2041                 if (((sge->FlagsLength >> MPI2_SGE_FLAGS_SHIFT) &
 2042                     MPI2_SGE_FLAGS_ADDRESS_SIZE) == 0)
 2043                         panic("SGE simple %p flags %02x not marked 64-bit?",
 2044                             sge, sge->FlagsLength >> MPI2_SGE_FLAGS_SHIFT);
 2045 
 2046                 break;
 2047         default:
 2048                 panic("Unexpected SGE %p, flags %02x", tc, tc->Flags);
 2049         }
 2050 #endif
 2051 
 2052         /*
 2053          * case 1: 1 more segment, enough room for it
 2054          * case 2: 2 more segments, enough room for both
 2055          * case 3: >=2 more segments, only enough room for 1 and a chain
 2056          * case 4: >=1 more segment, enough room for only a chain
 2057          * case 5: >=1 more segment, no room for anything (error)
 2058          */
 2059 
 2060         /*
 2061          * There should be room for at least a chain element, or this
 2062          * code is buggy.  Case (5).
 2063          */
 2064         if (cm->cm_sglsize < MPS_SGC_SIZE)
 2065                 panic("MPS: Need SGE Error Code\n");
 2066 
 2067         if (segsleft >= 2 &&
 2068             cm->cm_sglsize < len + MPS_SGC_SIZE + MPS_SGE64_SIZE) {
 2069                 /*
 2070                  * There are 2 or more segments left to add, and only
 2071                  * enough room for 1 and a chain.  Case (3).
 2072                  *
 2073                  * Mark as last element in this chain if necessary.
 2074                  */
 2075                 if (type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) {
 2076                         sge->FlagsLength |=
 2077                                 (MPI2_SGE_FLAGS_LAST_ELEMENT << MPI2_SGE_FLAGS_SHIFT);
 2078                 }
 2079 
 2080                 /*
 2081                  * Add the item then a chain.  Do the chain now,
 2082                  * rather than on the next iteration, to simplify
 2083                  * understanding the code.
 2084                  */
 2085                 cm->cm_sglsize -= len;
 2086                 /* Endian Safe code */
 2087                 sge_flags = sge->FlagsLength;
 2088                 sge->FlagsLength = htole32(sge_flags);
 2089                 sge->Address.High = htole32(sge->Address.High); 
 2090                 sge->Address.Low =      htole32(sge->Address.Low);
 2091                 bcopy(sgep, cm->cm_sge, len);
 2092                 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
 2093                 return (mps_add_chain(cm));
 2094         }
 2095 
 2096         if (segsleft >= 1 && cm->cm_sglsize < len + MPS_SGC_SIZE) {
 2097                 /*
 2098                  * 1 or more segment, enough room for only a chain.
 2099                  * Hope the previous element wasn't a Simple entry
 2100                  * that needed to be marked with
 2101                  * MPI2_SGE_FLAGS_LAST_ELEMENT.  Case (4).
 2102                  */
 2103                 if ((error = mps_add_chain(cm)) != 0)
 2104                         return (error);
 2105         }
 2106 
 2107 #ifdef INVARIANTS
 2108         /* Case 1: 1 more segment, enough room for it. */
 2109         if (segsleft == 1 && cm->cm_sglsize < len)
 2110                 panic("1 seg left and no room? %u versus %zu",
 2111                     cm->cm_sglsize, len);
 2112 
 2113         /* Case 2: 2 more segments, enough room for both */
 2114         if (segsleft == 2 && cm->cm_sglsize < len + MPS_SGE64_SIZE)
 2115                 panic("2 segs left and no room? %u versus %zu",
 2116                     cm->cm_sglsize, len);
 2117 #endif
 2118 
 2119         if (segsleft == 1 && type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) {
 2120                 /*
 2121                  * If this is a bi-directional request, need to account for that
 2122                  * here.  Save the pre-filled sge values.  These will be used
 2123                  * either for the 2nd SGL or for a single direction SGL.  If
 2124                  * cm_out_len is non-zero, this is a bi-directional request, so
 2125                  * fill in the OUT SGL first, then the IN SGL, otherwise just
 2126                  * fill in the IN SGL.  Note that at this time, when filling in
 2127                  * 2 SGL's for a bi-directional request, they both use the same
 2128                  * DMA buffer (same cm command).
 2129                  */
 2130                 saved_buf_len = sge->FlagsLength & 0x00FFFFFF;
 2131                 saved_address_low = sge->Address.Low;
 2132                 saved_address_high = sge->Address.High;
 2133                 if (cm->cm_out_len) {
 2134                         sge->FlagsLength = cm->cm_out_len |
 2135                             ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
 2136                             MPI2_SGE_FLAGS_END_OF_BUFFER |
 2137                             MPI2_SGE_FLAGS_HOST_TO_IOC |
 2138                             MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
 2139                             MPI2_SGE_FLAGS_SHIFT);
 2140                         cm->cm_sglsize -= len;
 2141                         /* Endian Safe code */
 2142                         sge_flags = sge->FlagsLength;
 2143                         sge->FlagsLength = htole32(sge_flags);
 2144                         sge->Address.High = htole32(sge->Address.High); 
 2145                         sge->Address.Low =      htole32(sge->Address.Low);
 2146                         bcopy(sgep, cm->cm_sge, len);
 2147                         cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge
 2148                             + len);
 2149                 }
 2150                 sge->FlagsLength = saved_buf_len |
 2151                     ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
 2152                     MPI2_SGE_FLAGS_END_OF_BUFFER |
 2153                     MPI2_SGE_FLAGS_LAST_ELEMENT |
 2154                     MPI2_SGE_FLAGS_END_OF_LIST |
 2155                     MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
 2156                     MPI2_SGE_FLAGS_SHIFT);
 2157                 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) {
 2158                         sge->FlagsLength |=
 2159                             ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
 2160                             MPI2_SGE_FLAGS_SHIFT);
 2161                 } else {
 2162                         sge->FlagsLength |=
 2163                             ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
 2164                             MPI2_SGE_FLAGS_SHIFT);
 2165                 }
 2166                 sge->Address.Low = saved_address_low;
 2167                 sge->Address.High = saved_address_high;
 2168         }
 2169 
 2170         cm->cm_sglsize -= len;
 2171         /* Endian Safe code */
 2172         sge_flags = sge->FlagsLength;
 2173         sge->FlagsLength = htole32(sge_flags);
 2174         sge->Address.High = htole32(sge->Address.High); 
 2175         sge->Address.Low =      htole32(sge->Address.Low);
 2176         bcopy(sgep, cm->cm_sge, len);
 2177         cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
 2178         return (0);
 2179 }
 2180 
 2181 /*
 2182  * Add one dma segment to the scatter-gather list for a command.
 2183  */
 2184 int
 2185 mps_add_dmaseg(struct mps_command *cm, vm_paddr_t pa, size_t len, u_int flags,
 2186     int segsleft)
 2187 {
 2188         MPI2_SGE_SIMPLE64 sge;
 2189 
 2190         /*
 2191          * This driver always uses 64-bit address elements for simplicity.
 2192          */
 2193         flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
 2194             MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
 2195         /* Set Endian safe macro in mps_push_sge */
 2196         sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT);
 2197         mps_from_u64(pa, &sge.Address);
 2198 
 2199         return (mps_push_sge(cm, &sge, sizeof sge, segsleft));
 2200 }
 2201 
 2202 static void
 2203 mps_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
 2204 {
 2205         struct mps_softc *sc;
 2206         struct mps_command *cm;
 2207         u_int i, dir, sflags;
 2208 
 2209         cm = (struct mps_command *)arg;
 2210         sc = cm->cm_sc;
 2211 
 2212         /*
 2213          * In this case, just print out a warning and let the chip tell the
 2214          * user they did the wrong thing.
 2215          */
 2216         if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) {
 2217                 mps_printf(sc, "%s: warning: busdma returned %d segments, "
 2218                            "more than the %d allowed\n", __func__, nsegs,
 2219                            cm->cm_max_segs);
 2220         }
 2221 
 2222         /*
 2223          * Set up DMA direction flags.  Bi-directional requests are also handled
 2224          * here.  In that case, both direction flags will be set.
 2225          */
 2226         sflags = 0;
 2227         if (cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) {
 2228                 /*
 2229                  * We have to add a special case for SMP passthrough, there
 2230                  * is no easy way to generically handle it.  The first
 2231                  * S/G element is used for the command (therefore the
 2232                  * direction bit needs to be set).  The second one is used
 2233                  * for the reply.  We'll leave it to the caller to make
 2234                  * sure we only have two buffers.
 2235                  */
 2236                 /*
 2237                  * Even though the busdma man page says it doesn't make
 2238                  * sense to have both direction flags, it does in this case.
 2239                  * We have one s/g element being accessed in each direction.
 2240                  */
 2241                 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD;
 2242 
 2243                 /*
 2244                  * Set the direction flag on the first buffer in the SMP
 2245                  * passthrough request.  We'll clear it for the second one.
 2246                  */
 2247                 sflags |= MPI2_SGE_FLAGS_DIRECTION |
 2248                           MPI2_SGE_FLAGS_END_OF_BUFFER;
 2249         } else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) {
 2250                 sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
 2251                 dir = BUS_DMASYNC_PREWRITE;
 2252         } else
 2253                 dir = BUS_DMASYNC_PREREAD;
 2254 
 2255         for (i = 0; i < nsegs; i++) {
 2256                 if ((cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) && (i != 0)) {
 2257                         sflags &= ~MPI2_SGE_FLAGS_DIRECTION;
 2258                 }
 2259                 error = mps_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len,
 2260                     sflags, nsegs - i);
 2261                 if (error != 0) {
 2262                         /* Resource shortage, roll back! */
 2263                         mps_dprint(sc, MPS_INFO, "out of chain frames\n");
 2264                         cm->cm_flags |= MPS_CM_FLAGS_CHAIN_FAILED;
 2265                         mps_complete_command(cm);
 2266                         return;
 2267                 }
 2268         }
 2269 
 2270         bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
 2271         mps_enqueue_request(sc, cm);
 2272 
 2273         return;
 2274 }
 2275 
 2276 static void
 2277 mps_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize,
 2278              int error)
 2279 {
 2280         mps_data_cb(arg, segs, nsegs, error);
 2281 }
 2282 
 2283 /*
 2284  * This is the routine to enqueue commands ansynchronously.
 2285  * Note that the only error path here is from bus_dmamap_load(), which can
 2286  * return EINPROGRESS if it is waiting for resources.  Other than this, it's
 2287  * assumed that if you have a command in-hand, then you have enough credits
 2288  * to use it.
 2289  */
 2290 int
 2291 mps_map_command(struct mps_softc *sc, struct mps_command *cm)
 2292 {
 2293         MPI2_SGE_SIMPLE32 *sge;
 2294         int error = 0;
 2295 
 2296         if (cm->cm_flags & MPS_CM_FLAGS_USE_UIO) {
 2297                 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap,
 2298                     &cm->cm_uio, mps_data_cb2, cm, 0);
 2299         } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) {
 2300                 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap,
 2301                     cm->cm_data, cm->cm_length, mps_data_cb, cm, 0);
 2302         } else {
 2303                 /* Add a zero-length element as needed */
 2304                 if (cm->cm_sge != NULL) {
 2305                         sge = (MPI2_SGE_SIMPLE32 *)cm->cm_sge;
 2306                         sge->FlagsLength = htole32((MPI2_SGE_FLAGS_LAST_ELEMENT |
 2307                             MPI2_SGE_FLAGS_END_OF_BUFFER |
 2308                             MPI2_SGE_FLAGS_END_OF_LIST |
 2309                             MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
 2310                             MPI2_SGE_FLAGS_SHIFT);
 2311                         sge->Address = 0;
 2312                 }
 2313                 mps_enqueue_request(sc, cm);    
 2314         }
 2315 
 2316         return (error);
 2317 }
 2318 
 2319 /*
 2320  * This is the routine to enqueue commands synchronously.  An error of
 2321  * EINPROGRESS from mps_map_command() is ignored since the command will
 2322  * be executed and enqueued automatically.  Other errors come from msleep().
 2323  */
 2324 int
 2325 mps_wait_command(struct mps_softc *sc, struct mps_command *cm, int timeout)
 2326 {
 2327         int error, rc;
 2328 
 2329         mtx_assert(&sc->mps_mtx, MA_OWNED);
 2330         
 2331         if(sc->mps_flags & MPS_FLAGS_DIAGRESET) 
 2332                 return  EBUSY;
 2333 
 2334         cm->cm_complete = NULL;
 2335         cm->cm_flags |= MPS_CM_FLAGS_WAKEUP;
 2336         error = mps_map_command(sc, cm);
 2337         if ((error != 0) && (error != EINPROGRESS))
 2338                 return (error);
 2339         error = msleep(cm, &sc->mps_mtx, 0, "mpswait", timeout*hz);
 2340         if (error == EWOULDBLOCK) {
 2341                 mps_dprint(sc, MPS_FAULT, "Calling Reinit from %s\n", __func__);
 2342                 rc = mps_reinit(sc);
 2343                 mps_dprint(sc, MPS_FAULT, "Reinit %s\n", 
 2344                                 (rc == 0) ? "success" : "failed");
 2345                 error = ETIMEDOUT;
 2346         }
 2347         return (error);
 2348 }
 2349 
 2350 /*
 2351  * This is the routine to enqueue a command synchonously and poll for
 2352  * completion.  Its use should be rare.
 2353  */
 2354 int
 2355 mps_request_polled(struct mps_softc *sc, struct mps_command *cm)
 2356 {
 2357         int error, timeout = 0, rc;
 2358 
 2359         error = 0;
 2360 
 2361         cm->cm_flags |= MPS_CM_FLAGS_POLLED;
 2362         cm->cm_complete = NULL;
 2363         mps_map_command(sc, cm);
 2364 
 2365         while ((cm->cm_flags & MPS_CM_FLAGS_COMPLETE) == 0) {
 2366                 mps_intr_locked(sc);
 2367 
 2368                 DELAY(50 * 1000);
 2369                 if (timeout++ > 1000) {
 2370                         mps_dprint(sc, MPS_FAULT, "polling failed\n");
 2371                         error = ETIMEDOUT;
 2372                         break;
 2373                 }
 2374         }
 2375         
 2376         if (error) {
 2377                 mps_dprint(sc, MPS_FAULT, "Calling Reinit from %s\n", __func__);
 2378                 rc = mps_reinit(sc);
 2379                 mps_dprint(sc, MPS_FAULT, "Reinit %s\n", 
 2380                                 (rc == 0) ? "success" : "failed");
 2381         }
 2382 
 2383         return (error);
 2384 }
 2385 
 2386 /*
 2387  * The MPT driver had a verbose interface for config pages.  In this driver,
 2388  * reduce it to much simplier terms, similar to the Linux driver.
 2389  */
 2390 int
 2391 mps_read_config_page(struct mps_softc *sc, struct mps_config_params *params)
 2392 {
 2393         MPI2_CONFIG_REQUEST *req;
 2394         struct mps_command *cm;
 2395         int error;
 2396 
 2397         if (sc->mps_flags & MPS_FLAGS_BUSY) {
 2398                 return (EBUSY);
 2399         }
 2400 
 2401         cm = mps_alloc_command(sc);
 2402         if (cm == NULL) {
 2403                 return (EBUSY);
 2404         }
 2405 
 2406         req = (MPI2_CONFIG_REQUEST *)cm->cm_req;
 2407         req->Function = MPI2_FUNCTION_CONFIG;
 2408         req->Action = params->action;
 2409         req->SGLFlags = 0;
 2410         req->ChainOffset = 0;
 2411         req->PageAddress = params->page_address;
 2412         if (params->hdr.Ext.ExtPageType != 0) {
 2413                 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr;
 2414 
 2415                 hdr = &params->hdr.Ext;
 2416                 req->ExtPageType = hdr->ExtPageType;
 2417                 req->ExtPageLength = hdr->ExtPageLength;
 2418                 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
 2419                 req->Header.PageLength = 0; /* Must be set to zero */
 2420                 req->Header.PageNumber = hdr->PageNumber;
 2421                 req->Header.PageVersion = hdr->PageVersion;
 2422         } else {
 2423                 MPI2_CONFIG_PAGE_HEADER *hdr;
 2424 
 2425                 hdr = &params->hdr.Struct;
 2426                 req->Header.PageType = hdr->PageType;
 2427                 req->Header.PageNumber = hdr->PageNumber;
 2428                 req->Header.PageLength = hdr->PageLength;
 2429                 req->Header.PageVersion = hdr->PageVersion;
 2430         }
 2431 
 2432         cm->cm_data = params->buffer;
 2433         cm->cm_length = params->length;
 2434         cm->cm_sge = &req->PageBufferSGE;
 2435         cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
 2436         cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN;
 2437         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
 2438 
 2439         cm->cm_complete_data = params;
 2440         if (params->callback != NULL) {
 2441                 cm->cm_complete = mps_config_complete;
 2442                 return (mps_map_command(sc, cm));
 2443         } else {
 2444                 error = mps_wait_command(sc, cm, 0);
 2445                 if (error) {
 2446                         mps_dprint(sc, MPS_FAULT,
 2447                             "Error %d reading config page\n", error);
 2448                         mps_free_command(sc, cm);
 2449                         return (error);
 2450                 }
 2451                 mps_config_complete(sc, cm);
 2452         }
 2453 
 2454         return (0);
 2455 }
 2456 
 2457 int
 2458 mps_write_config_page(struct mps_softc *sc, struct mps_config_params *params)
 2459 {
 2460         return (EINVAL);
 2461 }
 2462 
 2463 static void
 2464 mps_config_complete(struct mps_softc *sc, struct mps_command *cm)
 2465 {
 2466         MPI2_CONFIG_REPLY *reply;
 2467         struct mps_config_params *params;
 2468 
 2469         params = cm->cm_complete_data;
 2470 
 2471         if (cm->cm_data != NULL) {
 2472                 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
 2473                     BUS_DMASYNC_POSTREAD);
 2474                 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
 2475         }
 2476 
 2477         /*
 2478          * XXX KDM need to do more error recovery?  This results in the
 2479          * device in question not getting probed.
 2480          */
 2481         if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 2482                 params->status = MPI2_IOCSTATUS_BUSY;
 2483                 goto done;
 2484         }
 2485 
 2486         reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
 2487         if (reply == NULL) {
 2488                 params->status = MPI2_IOCSTATUS_BUSY;
 2489                 goto done;
 2490         }
 2491         params->status = reply->IOCStatus;
 2492         if (params->hdr.Ext.ExtPageType != 0) {
 2493                 params->hdr.Ext.ExtPageType = reply->ExtPageType;
 2494                 params->hdr.Ext.ExtPageLength = reply->ExtPageLength;
 2495         } else {
 2496                 params->hdr.Struct.PageType = reply->Header.PageType;
 2497                 params->hdr.Struct.PageNumber = reply->Header.PageNumber;
 2498                 params->hdr.Struct.PageLength = reply->Header.PageLength;
 2499                 params->hdr.Struct.PageVersion = reply->Header.PageVersion;
 2500         }
 2501 
 2502 done:
 2503         mps_free_command(sc, cm);
 2504         if (params->callback != NULL)
 2505                 params->callback(sc, params);
 2506 
 2507         return;
 2508 }

Cache object: 27dc29733ef6cc35be78de100e192f14


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.