The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mps/mps.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2009 Yahoo! Inc.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/9.0/sys/dev/mps/mps.c 219036 2011-02-25 17:30:25Z ken $");
   29 
   30 /* Communications core for LSI MPT2 */
   31 
   32 #include <sys/types.h>
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/kernel.h>
   36 #include <sys/selinfo.h>
   37 #include <sys/lock.h>
   38 #include <sys/mutex.h>
   39 #include <sys/module.h>
   40 #include <sys/bus.h>
   41 #include <sys/conf.h>
   42 #include <sys/bio.h>
   43 #include <sys/malloc.h>
   44 #include <sys/uio.h>
   45 #include <sys/sysctl.h>
   46 #include <sys/endian.h>
   47 
   48 #include <machine/bus.h>
   49 #include <machine/resource.h>
   50 #include <sys/rman.h>
   51 
   52 #include <cam/scsi/scsi_all.h>
   53 
   54 #include <dev/mps/mpi/mpi2_type.h>
   55 #include <dev/mps/mpi/mpi2.h>
   56 #include <dev/mps/mpi/mpi2_ioc.h>
   57 #include <dev/mps/mpi/mpi2_cnfg.h>
   58 #include <dev/mps/mpsvar.h>
   59 #include <dev/mps/mps_table.h>
   60 
   61 static void mps_startup(void *arg);
   62 static void mps_startup_complete(struct mps_softc *sc, struct mps_command *cm);
   63 static int mps_send_iocinit(struct mps_softc *sc);
   64 static int mps_attach_log(struct mps_softc *sc);
   65 static __inline void mps_complete_command(struct mps_command *cm);
   66 static void mps_dispatch_event(struct mps_softc *sc, uintptr_t data, MPI2_EVENT_NOTIFICATION_REPLY *reply);
   67 static void mps_config_complete(struct mps_softc *sc, struct mps_command *cm);
   68 static void mps_periodic(void *);
   69 
   70 SYSCTL_NODE(_hw, OID_AUTO, mps, CTLFLAG_RD, 0, "MPS Driver Parameters");
   71 
   72 MALLOC_DEFINE(M_MPT2, "mps", "mpt2 driver memory");
   73 
   74 /*
   75  * Do a "Diagnostic Reset" aka a hard reset.  This should get the chip out of
   76  * any state and back to its initialization state machine.
   77  */
   78 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d };
   79 
   80 static int
   81 mps_hard_reset(struct mps_softc *sc)
   82 {
   83         uint32_t reg;
   84         int i, error, tries = 0;
   85 
   86         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
   87 
   88         /* Clear any pending interrupts */
   89         mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
   90 
   91         /* Push the magic sequence */
   92         error = ETIMEDOUT;
   93         while (tries++ < 20) {
   94                 for (i = 0; i < sizeof(mpt2_reset_magic); i++)
   95                         mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET,
   96                             mpt2_reset_magic[i]);
   97 
   98                 DELAY(100 * 1000);
   99 
  100                 reg = mps_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
  101                 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) {
  102                         error = 0;
  103                         break;
  104                 }
  105         }
  106         if (error)
  107                 return (error);
  108 
  109         /* Send the actual reset.  XXX need to refresh the reg? */
  110         mps_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET,
  111             reg | MPI2_DIAG_RESET_ADAPTER);
  112 
  113         /* Wait up to 300 seconds in 50ms intervals */
  114         error = ETIMEDOUT;
  115         for (i = 0; i < 60000; i++) {
  116                 DELAY(50000);
  117                 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET);
  118                 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) {
  119                         error = 0;
  120                         break;
  121                 }
  122         }
  123         if (error)
  124                 return (error);
  125 
  126         mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0);
  127 
  128         return (0);
  129 }
  130 
  131 static int
  132 mps_soft_reset(struct mps_softc *sc)
  133 {
  134 
  135         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  136 
  137         mps_regwrite(sc, MPI2_DOORBELL_OFFSET,
  138             MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET <<
  139             MPI2_DOORBELL_FUNCTION_SHIFT);
  140         DELAY(50000);
  141 
  142         return (0);
  143 }
  144 
  145 static int
  146 mps_transition_ready(struct mps_softc *sc)
  147 {
  148         uint32_t reg, state;
  149         int error, tries = 0;
  150 
  151         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  152 
  153         error = 0;
  154         while (tries++ < 5) {
  155                 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET);
  156                 mps_dprint(sc, MPS_INFO, "Doorbell= 0x%x\n", reg);
  157 
  158                 /*
  159                  * Ensure the IOC is ready to talk.  If it's not, try
  160                  * resetting it.
  161                  */
  162                 if (reg & MPI2_DOORBELL_USED) {
  163                         mps_hard_reset(sc);
  164                         DELAY(50000);
  165                         continue;
  166                 }
  167 
  168                 /* Is the adapter owned by another peer? */
  169                 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) ==
  170                     (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) {
  171                         device_printf(sc->mps_dev, "IOC is under the control "
  172                             "of another peer host, aborting initialization.\n");
  173                         return (ENXIO);
  174                 }
  175                 
  176                 state = reg & MPI2_IOC_STATE_MASK;
  177                 if (state == MPI2_IOC_STATE_READY) {
  178                         /* Ready to go! */
  179                         error = 0;
  180                         break;
  181                 } else if (state == MPI2_IOC_STATE_FAULT) {
  182                         mps_dprint(sc, MPS_INFO, "IOC in fault state 0x%x\n",
  183                             state & MPI2_DOORBELL_FAULT_CODE_MASK);
  184                         mps_hard_reset(sc);
  185                 } else if (state == MPI2_IOC_STATE_OPERATIONAL) {
  186                         /* Need to take ownership */
  187                         mps_soft_reset(sc);
  188                 } else if (state == MPI2_IOC_STATE_RESET) {
  189                         /* Wait a bit, IOC might be in transition */
  190                         mps_dprint(sc, MPS_FAULT,
  191                             "IOC in unexpected reset state\n");
  192                 } else {
  193                         mps_dprint(sc, MPS_FAULT,
  194                             "IOC in unknown state 0x%x\n", state);
  195                         error = EINVAL;
  196                         break;
  197                 }
  198         
  199                 /* Wait 50ms for things to settle down. */
  200                 DELAY(50000);
  201         }
  202 
  203         if (error)
  204                 device_printf(sc->mps_dev, "Cannot transition IOC to ready\n");
  205 
  206         return (error);
  207 }
  208 
  209 static int
  210 mps_transition_operational(struct mps_softc *sc)
  211 {
  212         uint32_t reg, state;
  213         int error;
  214 
  215         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  216 
  217         error = 0;
  218         reg = mps_regread(sc, MPI2_DOORBELL_OFFSET);
  219         mps_dprint(sc, MPS_INFO, "Doorbell= 0x%x\n", reg);
  220 
  221         state = reg & MPI2_IOC_STATE_MASK;
  222         if (state != MPI2_IOC_STATE_READY) {
  223                 if ((error = mps_transition_ready(sc)) != 0)
  224                         return (error);
  225         }
  226 
  227         error = mps_send_iocinit(sc);
  228         return (error);
  229 }
  230 
  231 /* Wait for the chip to ACK a word that we've put into its FIFO */
  232 static int
  233 mps_wait_db_ack(struct mps_softc *sc)
  234 {
  235         int retry;
  236 
  237         for (retry = 0; retry < MPS_DB_MAX_WAIT; retry++) {
  238                 if ((mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) &
  239                     MPI2_HIS_SYS2IOC_DB_STATUS) == 0)
  240                         return (0);
  241                 DELAY(2000);
  242         }
  243         return (ETIMEDOUT);
  244 }
  245 
  246 /* Wait for the chip to signal that the next word in its FIFO can be fetched */
  247 static int
  248 mps_wait_db_int(struct mps_softc *sc)
  249 {
  250         int retry;
  251 
  252         for (retry = 0; retry < MPS_DB_MAX_WAIT; retry++) {
  253                 if ((mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) &
  254                     MPI2_HIS_IOC2SYS_DB_STATUS) != 0)
  255                         return (0);
  256                 DELAY(2000);
  257         }
  258         return (ETIMEDOUT);
  259 }
  260 
  261 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */
  262 static int
  263 mps_request_sync(struct mps_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply,
  264     int req_sz, int reply_sz, int timeout)
  265 {
  266         uint32_t *data32;
  267         uint16_t *data16;
  268         int i, count, ioc_sz, residual;
  269 
  270         /* Step 1 */
  271         mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
  272 
  273         /* Step 2 */
  274         if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
  275                 return (EBUSY);
  276 
  277         /* Step 3
  278          * Announce that a message is coming through the doorbell.  Messages
  279          * are pushed at 32bit words, so round up if needed.
  280          */
  281         count = (req_sz + 3) / 4;
  282         mps_regwrite(sc, MPI2_DOORBELL_OFFSET,
  283             (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) |
  284             (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT));
  285 
  286         /* Step 4 */
  287         if (mps_wait_db_int(sc) ||
  288             (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) {
  289                 mps_dprint(sc, MPS_FAULT, "Doorbell failed to activate\n");
  290                 return (ENXIO);
  291         }
  292         mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
  293         if (mps_wait_db_ack(sc) != 0) {
  294                 mps_dprint(sc, MPS_FAULT, "Doorbell handshake failed\n");
  295                 return (ENXIO);
  296         }
  297 
  298         /* Step 5 */
  299         /* Clock out the message data synchronously in 32-bit dwords*/
  300         data32 = (uint32_t *)req;
  301         for (i = 0; i < count; i++) {
  302                 mps_regwrite(sc, MPI2_DOORBELL_OFFSET, data32[i]);
  303                 if (mps_wait_db_ack(sc) != 0) {
  304                         mps_dprint(sc, MPS_FAULT,
  305                             "Timeout while writing doorbell\n");
  306                         return (ENXIO);
  307                 }
  308         }
  309 
  310         /* Step 6 */
  311         /* Clock in the reply in 16-bit words.  The total length of the
  312          * message is always in the 4th byte, so clock out the first 2 words
  313          * manually, then loop the rest.
  314          */
  315         data16 = (uint16_t *)reply;
  316         if (mps_wait_db_int(sc) != 0) {
  317                 mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 0\n");
  318                 return (ENXIO);
  319         }
  320         data16[0] =
  321             mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
  322         mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
  323         if (mps_wait_db_int(sc) != 0) {
  324                 mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 1\n");
  325                 return (ENXIO);
  326         }
  327         data16[1] =
  328             mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
  329         mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
  330 
  331         /* Number of 32bit words in the message */
  332         ioc_sz = reply->MsgLength;
  333 
  334         /*
  335          * Figure out how many 16bit words to clock in without overrunning.
  336          * The precision loss with dividing reply_sz can safely be
  337          * ignored because the messages can only be multiples of 32bits.
  338          */
  339         residual = 0;
  340         count = MIN((reply_sz / 4), ioc_sz) * 2;
  341         if (count < ioc_sz * 2) {
  342                 residual = ioc_sz * 2 - count;
  343                 mps_dprint(sc, MPS_FAULT, "Driver error, throwing away %d "
  344                     "residual message words\n", residual);
  345         }
  346 
  347         for (i = 2; i < count; i++) {
  348                 if (mps_wait_db_int(sc) != 0) {
  349                         mps_dprint(sc, MPS_FAULT,
  350                             "Timeout reading doorbell %d\n", i);
  351                         return (ENXIO);
  352                 }
  353                 data16[i] = mps_regread(sc, MPI2_DOORBELL_OFFSET) &
  354                     MPI2_DOORBELL_DATA_MASK;
  355                 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
  356         }
  357 
  358         /*
  359          * Pull out residual words that won't fit into the provided buffer.
  360          * This keeps the chip from hanging due to a driver programming
  361          * error.
  362          */
  363         while (residual--) {
  364                 if (mps_wait_db_int(sc) != 0) {
  365                         mps_dprint(sc, MPS_FAULT,
  366                             "Timeout reading doorbell\n");
  367                         return (ENXIO);
  368                 }
  369                 (void)mps_regread(sc, MPI2_DOORBELL_OFFSET);
  370                 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
  371         }
  372 
  373         /* Step 7 */
  374         if (mps_wait_db_int(sc) != 0) {
  375                 mps_dprint(sc, MPS_FAULT, "Timeout waiting to exit doorbell\n");
  376                 return (ENXIO);
  377         }
  378         if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
  379                 mps_dprint(sc, MPS_FAULT, "Warning, doorbell still active\n");
  380         mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
  381 
  382         return (0);
  383 }
  384 
  385 void
  386 mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm)
  387 {
  388 
  389         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  390 
  391         if (sc->mps_flags & MPS_FLAGS_ATTACH_DONE)
  392                 mtx_assert(&sc->mps_mtx, MA_OWNED);
  393 
  394         if ((cm->cm_desc.Default.SMID < 1)
  395          || (cm->cm_desc.Default.SMID >= sc->num_reqs)) {
  396                 mps_printf(sc, "%s: invalid SMID %d, desc %#x %#x\n",
  397                            __func__, cm->cm_desc.Default.SMID,
  398                            cm->cm_desc.Words.High, cm->cm_desc.Words.Low);
  399         }
  400         mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET,
  401             cm->cm_desc.Words.Low);
  402         mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET,
  403             cm->cm_desc.Words.High);
  404 }
  405 
  406 int
  407 mps_request_polled(struct mps_softc *sc, struct mps_command *cm)
  408 {
  409         int error, timeout = 0;
  410 
  411         error = 0;
  412 
  413         cm->cm_flags |= MPS_CM_FLAGS_POLLED;
  414         cm->cm_complete = NULL;
  415         mps_map_command(sc, cm);
  416 
  417         while ((cm->cm_flags & MPS_CM_FLAGS_COMPLETE) == 0) {
  418                 mps_intr(sc);
  419                 DELAY(50 * 1000);
  420                 if (timeout++ > 1000) {
  421                         mps_dprint(sc, MPS_FAULT, "polling failed\n");
  422                         error = ETIMEDOUT;
  423                         break;
  424                 }
  425         }
  426 
  427         return (error);
  428 }
  429 
  430 /*
  431  * Just the FACTS, ma'am.
  432  */
  433 static int
  434 mps_get_iocfacts(struct mps_softc *sc, MPI2_IOC_FACTS_REPLY *facts)
  435 {
  436         MPI2_DEFAULT_REPLY *reply;
  437         MPI2_IOC_FACTS_REQUEST request;
  438         int error, req_sz, reply_sz;
  439 
  440         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  441 
  442         req_sz = sizeof(MPI2_IOC_FACTS_REQUEST);
  443         reply_sz = sizeof(MPI2_IOC_FACTS_REPLY);
  444         reply = (MPI2_DEFAULT_REPLY *)facts;
  445 
  446         bzero(&request, req_sz);
  447         request.Function = MPI2_FUNCTION_IOC_FACTS;
  448         error = mps_request_sync(sc, &request, reply, req_sz, reply_sz, 5);
  449 
  450         return (error);
  451 }
  452 
  453 static int
  454 mps_get_portfacts(struct mps_softc *sc, MPI2_PORT_FACTS_REPLY *facts, int port)
  455 {
  456         MPI2_PORT_FACTS_REQUEST *request;
  457         MPI2_PORT_FACTS_REPLY *reply;
  458         struct mps_command *cm;
  459         int error;
  460 
  461         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  462 
  463         if ((cm = mps_alloc_command(sc)) == NULL)
  464                 return (EBUSY);
  465         request = (MPI2_PORT_FACTS_REQUEST *)cm->cm_req;
  466         request->Function = MPI2_FUNCTION_PORT_FACTS;
  467         request->PortNumber = port;
  468         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
  469         cm->cm_data = NULL;
  470         error = mps_request_polled(sc, cm);
  471         reply = (MPI2_PORT_FACTS_REPLY *)cm->cm_reply;
  472         if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
  473                 error = ENXIO;
  474         bcopy(reply, facts, sizeof(MPI2_PORT_FACTS_REPLY));
  475         mps_free_command(sc, cm);
  476 
  477         return (error);
  478 }
  479 
  480 static int
  481 mps_send_iocinit(struct mps_softc *sc)
  482 {
  483         MPI2_IOC_INIT_REQUEST   init;
  484         MPI2_DEFAULT_REPLY      reply;
  485         int req_sz, reply_sz, error;
  486 
  487         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  488 
  489         req_sz = sizeof(MPI2_IOC_INIT_REQUEST);
  490         reply_sz = sizeof(MPI2_IOC_INIT_REPLY);
  491         bzero(&init, req_sz);
  492         bzero(&reply, reply_sz);
  493 
  494         /*
  495          * Fill in the init block.  Note that most addresses are
  496          * deliberately in the lower 32bits of memory.  This is a micro-
  497          * optimzation for PCI/PCIX, though it's not clear if it helps PCIe.
  498          */
  499         init.Function = MPI2_FUNCTION_IOC_INIT;
  500         init.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
  501         init.MsgVersion = MPI2_VERSION;
  502         init.HeaderVersion = MPI2_HEADER_VERSION;
  503         init.SystemRequestFrameSize = sc->facts->IOCRequestFrameSize;
  504         init.ReplyDescriptorPostQueueDepth = sc->pqdepth;
  505         init.ReplyFreeQueueDepth = sc->fqdepth;
  506         init.SenseBufferAddressHigh = 0;
  507         init.SystemReplyAddressHigh = 0;
  508         init.SystemRequestFrameBaseAddress.High = 0;
  509         init.SystemRequestFrameBaseAddress.Low = (uint32_t)sc->req_busaddr;
  510         init.ReplyDescriptorPostQueueAddress.High = 0;
  511         init.ReplyDescriptorPostQueueAddress.Low = (uint32_t)sc->post_busaddr;
  512         init.ReplyFreeQueueAddress.High = 0;
  513         init.ReplyFreeQueueAddress.Low = (uint32_t)sc->free_busaddr;
  514         init.TimeStamp.High = 0;
  515         init.TimeStamp.Low = (uint32_t)time_uptime;
  516 
  517         error = mps_request_sync(sc, &init, &reply, req_sz, reply_sz, 5);
  518         if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
  519                 error = ENXIO;
  520 
  521         mps_dprint(sc, MPS_INFO, "IOCInit status= 0x%x\n", reply.IOCStatus);
  522         return (error);
  523 }
  524 
  525 static int
  526 mps_send_portenable(struct mps_softc *sc)
  527 {
  528         MPI2_PORT_ENABLE_REQUEST *request;
  529         struct mps_command *cm;
  530 
  531         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  532 
  533         if ((cm = mps_alloc_command(sc)) == NULL)
  534                 return (EBUSY);
  535         request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
  536         request->Function = MPI2_FUNCTION_PORT_ENABLE;
  537         request->MsgFlags = 0;
  538         request->VP_ID = 0;
  539         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
  540         cm->cm_complete = mps_startup_complete;
  541 
  542         mps_enqueue_request(sc, cm);
  543         return (0);
  544 }
  545 
  546 static int
  547 mps_send_mur(struct mps_softc *sc)
  548 {
  549 
  550         /* Placeholder */
  551         return (0);
  552 }
  553 
  554 void
  555 mps_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  556 {
  557         bus_addr_t *addr;
  558 
  559         addr = arg;
  560         *addr = segs[0].ds_addr;
  561 }
  562 
  563 static int
  564 mps_alloc_queues(struct mps_softc *sc)
  565 {
  566         bus_addr_t queues_busaddr;
  567         uint8_t *queues;
  568         int qsize, fqsize, pqsize;
  569 
  570         /*
  571          * The reply free queue contains 4 byte entries in multiples of 16 and
  572          * aligned on a 16 byte boundary. There must always be an unused entry.
  573          * This queue supplies fresh reply frames for the firmware to use.
  574          *
  575          * The reply descriptor post queue contains 8 byte entries in
  576          * multiples of 16 and aligned on a 16 byte boundary.  This queue
  577          * contains filled-in reply frames sent from the firmware to the host.
  578          *
  579          * These two queues are allocated together for simplicity.
  580          */
  581         sc->fqdepth = roundup2((sc->num_replies + 1), 16);
  582         sc->pqdepth = roundup2((sc->num_replies + 1), 16);
  583         fqsize= sc->fqdepth * 4;
  584         pqsize = sc->pqdepth * 8;
  585         qsize = fqsize + pqsize;
  586 
  587         if (bus_dma_tag_create( sc->mps_parent_dmat,    /* parent */
  588                                 16, 0,                  /* algnmnt, boundary */
  589                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  590                                 BUS_SPACE_MAXADDR,      /* highaddr */
  591                                 NULL, NULL,             /* filter, filterarg */
  592                                 qsize,                  /* maxsize */
  593                                 1,                      /* nsegments */
  594                                 qsize,                  /* maxsegsize */
  595                                 0,                      /* flags */
  596                                 NULL, NULL,             /* lockfunc, lockarg */
  597                                 &sc->queues_dmat)) {
  598                 device_printf(sc->mps_dev, "Cannot allocate queues DMA tag\n");
  599                 return (ENOMEM);
  600         }
  601         if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT,
  602             &sc->queues_map)) {
  603                 device_printf(sc->mps_dev, "Cannot allocate queues memory\n");
  604                 return (ENOMEM);
  605         }
  606         bzero(queues, qsize);
  607         bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize,
  608             mps_memaddr_cb, &queues_busaddr, 0);
  609 
  610         sc->free_queue = (uint32_t *)queues;
  611         sc->free_busaddr = queues_busaddr;
  612         sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize);
  613         sc->post_busaddr = queues_busaddr + fqsize;
  614 
  615         return (0);
  616 }
  617 
  618 static int
  619 mps_alloc_replies(struct mps_softc *sc)
  620 {
  621         int rsize, num_replies;
  622 
  623         /*
  624          * sc->num_replies should be one less than sc->fqdepth.  We need to
  625          * allocate space for sc->fqdepth replies, but only sc->num_replies
  626          * replies can be used at once.
  627          */
  628         num_replies = max(sc->fqdepth, sc->num_replies);
  629 
  630         rsize = sc->facts->ReplyFrameSize * num_replies * 4; 
  631         if (bus_dma_tag_create( sc->mps_parent_dmat,    /* parent */
  632                                 4, 0,                   /* algnmnt, boundary */
  633                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  634                                 BUS_SPACE_MAXADDR,      /* highaddr */
  635                                 NULL, NULL,             /* filter, filterarg */
  636                                 rsize,                  /* maxsize */
  637                                 1,                      /* nsegments */
  638                                 rsize,                  /* maxsegsize */
  639                                 0,                      /* flags */
  640                                 NULL, NULL,             /* lockfunc, lockarg */
  641                                 &sc->reply_dmat)) {
  642                 device_printf(sc->mps_dev, "Cannot allocate replies DMA tag\n");
  643                 return (ENOMEM);
  644         }
  645         if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames,
  646             BUS_DMA_NOWAIT, &sc->reply_map)) {
  647                 device_printf(sc->mps_dev, "Cannot allocate replies memory\n");
  648                 return (ENOMEM);
  649         }
  650         bzero(sc->reply_frames, rsize);
  651         bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize,
  652             mps_memaddr_cb, &sc->reply_busaddr, 0);
  653 
  654         return (0);
  655 }
  656 
  657 static int
  658 mps_alloc_requests(struct mps_softc *sc)
  659 {
  660         struct mps_command *cm;
  661         struct mps_chain *chain;
  662         int i, rsize, nsegs;
  663 
  664         rsize = sc->facts->IOCRequestFrameSize * sc->num_reqs * 4;
  665         if (bus_dma_tag_create( sc->mps_parent_dmat,    /* parent */
  666                                 16, 0,                  /* algnmnt, boundary */
  667                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  668                                 BUS_SPACE_MAXADDR,      /* highaddr */
  669                                 NULL, NULL,             /* filter, filterarg */
  670                                 rsize,                  /* maxsize */
  671                                 1,                      /* nsegments */
  672                                 rsize,                  /* maxsegsize */
  673                                 0,                      /* flags */
  674                                 NULL, NULL,             /* lockfunc, lockarg */
  675                                 &sc->req_dmat)) {
  676                 device_printf(sc->mps_dev, "Cannot allocate request DMA tag\n");
  677                 return (ENOMEM);
  678         }
  679         if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames,
  680             BUS_DMA_NOWAIT, &sc->req_map)) {
  681                 device_printf(sc->mps_dev, "Cannot allocate request memory\n");
  682                 return (ENOMEM);
  683         }
  684         bzero(sc->req_frames, rsize);
  685         bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize,
  686             mps_memaddr_cb, &sc->req_busaddr, 0);
  687 
  688         rsize = sc->facts->IOCRequestFrameSize * MPS_CHAIN_FRAMES * 4;
  689         if (bus_dma_tag_create( sc->mps_parent_dmat,    /* parent */
  690                                 16, 0,                  /* algnmnt, boundary */
  691                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  692                                 BUS_SPACE_MAXADDR,      /* highaddr */
  693                                 NULL, NULL,             /* filter, filterarg */
  694                                 rsize,                  /* maxsize */
  695                                 1,                      /* nsegments */
  696                                 rsize,                  /* maxsegsize */
  697                                 0,                      /* flags */
  698                                 NULL, NULL,             /* lockfunc, lockarg */
  699                                 &sc->chain_dmat)) {
  700                 device_printf(sc->mps_dev, "Cannot allocate chain DMA tag\n");
  701                 return (ENOMEM);
  702         }
  703         if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames,
  704             BUS_DMA_NOWAIT, &sc->chain_map)) {
  705                 device_printf(sc->mps_dev, "Cannot allocate chain memory\n");
  706                 return (ENOMEM);
  707         }
  708         bzero(sc->chain_frames, rsize);
  709         bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, rsize,
  710             mps_memaddr_cb, &sc->chain_busaddr, 0);
  711 
  712         rsize = MPS_SENSE_LEN * sc->num_reqs;
  713         if (bus_dma_tag_create( sc->mps_parent_dmat,    /* parent */
  714                                 1, 0,                   /* algnmnt, boundary */
  715                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
  716                                 BUS_SPACE_MAXADDR,      /* highaddr */
  717                                 NULL, NULL,             /* filter, filterarg */
  718                                 rsize,                  /* maxsize */
  719                                 1,                      /* nsegments */
  720                                 rsize,                  /* maxsegsize */
  721                                 0,                      /* flags */
  722                                 NULL, NULL,             /* lockfunc, lockarg */
  723                                 &sc->sense_dmat)) {
  724                 device_printf(sc->mps_dev, "Cannot allocate sense DMA tag\n");
  725                 return (ENOMEM);
  726         }
  727         if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames,
  728             BUS_DMA_NOWAIT, &sc->sense_map)) {
  729                 device_printf(sc->mps_dev, "Cannot allocate sense memory\n");
  730                 return (ENOMEM);
  731         }
  732         bzero(sc->sense_frames, rsize);
  733         bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize,
  734             mps_memaddr_cb, &sc->sense_busaddr, 0);
  735 
  736         sc->chains = malloc(sizeof(struct mps_chain) * MPS_CHAIN_FRAMES,
  737             M_MPT2, M_WAITOK | M_ZERO);
  738         for (i = 0; i < MPS_CHAIN_FRAMES; i++) {
  739                 chain = &sc->chains[i];
  740                 chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames +
  741                     i * sc->facts->IOCRequestFrameSize * 4);
  742                 chain->chain_busaddr = sc->chain_busaddr +
  743                     i * sc->facts->IOCRequestFrameSize * 4;
  744                 mps_free_chain(sc, chain);
  745                 sc->chain_free_lowwater++;
  746         }
  747 
  748         /* XXX Need to pick a more precise value */
  749         nsegs = (MAXPHYS / PAGE_SIZE) + 1;
  750         if (bus_dma_tag_create( sc->mps_parent_dmat,    /* parent */
  751                                 1, 0,                   /* algnmnt, boundary */
  752                                 BUS_SPACE_MAXADDR,      /* lowaddr */
  753                                 BUS_SPACE_MAXADDR,      /* highaddr */
  754                                 NULL, NULL,             /* filter, filterarg */
  755                                 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
  756                                 nsegs,                  /* nsegments */
  757                                 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
  758                                 BUS_DMA_ALLOCNOW,       /* flags */
  759                                 busdma_lock_mutex,      /* lockfunc */
  760                                 &sc->mps_mtx,           /* lockarg */
  761                                 &sc->buffer_dmat)) {
  762                 device_printf(sc->mps_dev, "Cannot allocate sense DMA tag\n");
  763                 return (ENOMEM);
  764         }
  765 
  766         /*
  767          * SMID 0 cannot be used as a free command per the firmware spec.
  768          * Just drop that command instead of risking accounting bugs.
  769          */
  770         sc->commands = malloc(sizeof(struct mps_command) * sc->num_reqs,
  771             M_MPT2, M_WAITOK | M_ZERO);
  772         for (i = 1; i < sc->num_reqs; i++) {
  773                 cm = &sc->commands[i];
  774                 cm->cm_req = sc->req_frames +
  775                     i * sc->facts->IOCRequestFrameSize * 4;
  776                 cm->cm_req_busaddr = sc->req_busaddr +
  777                     i * sc->facts->IOCRequestFrameSize * 4;
  778                 cm->cm_sense = &sc->sense_frames[i];
  779                 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPS_SENSE_LEN;
  780                 cm->cm_desc.Default.SMID = i;
  781                 cm->cm_sc = sc;
  782                 TAILQ_INIT(&cm->cm_chain_list);
  783                 callout_init(&cm->cm_callout, 1 /*MPSAFE*/);
  784 
  785                 /* XXX Is a failure here a critical problem? */
  786                 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) == 0)
  787                         mps_free_command(sc, cm);
  788                 else {
  789                         sc->num_reqs = i;
  790                         break;
  791                 }
  792         }
  793 
  794         return (0);
  795 }
  796 
  797 static int
  798 mps_init_queues(struct mps_softc *sc)
  799 {
  800         int i;
  801 
  802         memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8);
  803 
  804         /*
  805          * According to the spec, we need to use one less reply than we
  806          * have space for on the queue.  So sc->num_replies (the number we
  807          * use) should be less than sc->fqdepth (allocated size).
  808          */
  809         if (sc->num_replies >= sc->fqdepth)
  810                 return (EINVAL);
  811 
  812         /*
  813          * Initialize all of the free queue entries.
  814          */
  815         for (i = 0; i < sc->fqdepth; i++)
  816                 sc->free_queue[i] = sc->reply_busaddr + (i * sc->facts->ReplyFrameSize * 4);
  817         sc->replyfreeindex = sc->num_replies;
  818 
  819         return (0);
  820 }
  821 
  822 int
  823 mps_attach(struct mps_softc *sc)
  824 {
  825         int i, error;
  826         char tmpstr[80], tmpstr2[80];
  827 
  828         /*
  829          * Grab any tunable-set debug level so that tracing works as early
  830          * as possible.
  831          */
  832         snprintf(tmpstr, sizeof(tmpstr), "hw.mps.%d.debug_level",
  833             device_get_unit(sc->mps_dev));
  834         TUNABLE_INT_FETCH(tmpstr, &sc->mps_debug);
  835         snprintf(tmpstr, sizeof(tmpstr), "hw.mps.%d.allow_multiple_tm_cmds",
  836             device_get_unit(sc->mps_dev));
  837         TUNABLE_INT_FETCH(tmpstr, &sc->allow_multiple_tm_cmds);
  838 
  839         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
  840 
  841         mtx_init(&sc->mps_mtx, "MPT2SAS lock", NULL, MTX_DEF);
  842         callout_init_mtx(&sc->periodic, &sc->mps_mtx, 0);
  843         TAILQ_INIT(&sc->event_list);
  844 
  845         /*
  846          * Setup the sysctl variable so the user can change the debug level
  847          * on the fly.
  848          */
  849         snprintf(tmpstr, sizeof(tmpstr), "MPS controller %d",
  850             device_get_unit(sc->mps_dev));
  851         snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mps_dev));
  852 
  853         sysctl_ctx_init(&sc->sysctl_ctx);
  854         sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
  855             SYSCTL_STATIC_CHILDREN(_hw_mps), OID_AUTO, tmpstr2, CTLFLAG_RD,
  856             0, tmpstr);
  857         if (sc->sysctl_tree == NULL)
  858                 return (ENOMEM);
  859 
  860         SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
  861             OID_AUTO, "debug_level", CTLFLAG_RW, &sc->mps_debug, 0,
  862             "mps debug level");
  863 
  864         SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
  865             OID_AUTO, "allow_multiple_tm_cmds", CTLFLAG_RW,
  866             &sc->allow_multiple_tm_cmds, 0,
  867             "allow multiple simultaneous task management cmds");
  868 
  869         SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
  870             OID_AUTO, "io_cmds_active", CTLFLAG_RD,
  871             &sc->io_cmds_active, 0, "number of currently active commands");
  872 
  873         SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
  874             OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
  875             &sc->io_cmds_highwater, 0, "maximum active commands seen");
  876 
  877         SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
  878             OID_AUTO, "chain_free", CTLFLAG_RD,
  879             &sc->chain_free, 0, "number of free chain elements");
  880 
  881         SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
  882             OID_AUTO, "chain_free_lowwater", CTLFLAG_RD,
  883             &sc->chain_free_lowwater, 0,"lowest number of free chain elements");
  884 
  885         SYSCTL_ADD_UQUAD(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
  886             OID_AUTO, "chain_alloc_fail", CTLFLAG_RD,
  887             &sc->chain_alloc_fail, "chain allocation failures");
  888 
  889         if ((error = mps_transition_ready(sc)) != 0)
  890                 return (error);
  891 
  892         sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPT2,
  893             M_ZERO|M_NOWAIT);
  894         if ((error = mps_get_iocfacts(sc, sc->facts)) != 0)
  895                 return (error);
  896 
  897         mps_print_iocfacts(sc, sc->facts);
  898 
  899         mps_printf(sc, "Firmware: %02d.%02d.%02d.%02d\n",
  900             sc->facts->FWVersion.Struct.Major,
  901             sc->facts->FWVersion.Struct.Minor,
  902             sc->facts->FWVersion.Struct.Unit,
  903             sc->facts->FWVersion.Struct.Dev);
  904         mps_printf(sc, "IOCCapabilities: %b\n", sc->facts->IOCCapabilities,
  905             "\2" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf"
  906             "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR"
  907             "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc");
  908 
  909         /*
  910          * If the chip doesn't support event replay then a hard reset will be
  911          * required to trigger a full discovery.  Do the reset here then
  912          * retransition to Ready.  A hard reset might have already been done,
  913          * but it doesn't hurt to do it again.
  914          */
  915         if ((sc->facts->IOCCapabilities &
  916             MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0) {
  917                 mps_hard_reset(sc);
  918                 if ((error = mps_transition_ready(sc)) != 0)
  919                         return (error);
  920         }
  921 
  922         /*
  923          * Size the queues. Since the reply queues always need one free entry,
  924          * we'll just deduct one reply message here.
  925          */
  926         sc->num_reqs = MIN(MPS_REQ_FRAMES, sc->facts->RequestCredit);
  927         sc->num_replies = MIN(MPS_REPLY_FRAMES + MPS_EVT_REPLY_FRAMES,
  928             sc->facts->MaxReplyDescriptorPostQueueDepth) - 1;
  929         mps_dprint(sc, MPS_INFO, "num_reqs %d, num_replies %d\n", sc->num_reqs,
  930                    sc->num_replies);
  931         TAILQ_INIT(&sc->req_list);
  932         TAILQ_INIT(&sc->chain_list);
  933         TAILQ_INIT(&sc->tm_list);
  934         TAILQ_INIT(&sc->io_list);
  935 
  936         if (((error = mps_alloc_queues(sc)) != 0) ||
  937             ((error = mps_alloc_replies(sc)) != 0) ||
  938             ((error = mps_alloc_requests(sc)) != 0)) {
  939                 mps_free(sc);
  940                 return (error);
  941         }
  942 
  943         if (((error = mps_init_queues(sc)) != 0) ||
  944             ((error = mps_transition_operational(sc)) != 0)) {
  945                 mps_free(sc);
  946                 return (error);
  947         }
  948 
  949         /*
  950          * Finish the queue initialization.
  951          * These are set here instead of in mps_init_queues() because the
  952          * IOC resets these values during the state transition in
  953          * mps_transition_operational().  The free index is set to 1
  954          * because the corresponding index in the IOC is set to 0, and the
  955          * IOC treats the queues as full if both are set to the same value.
  956          * Hence the reason that the queue can't hold all of the possible
  957          * replies.
  958          */
  959         sc->replypostindex = 0;
  960         mps_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex);
  961         mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0);
  962 
  963         sc->pfacts = malloc(sizeof(MPI2_PORT_FACTS_REPLY) *
  964             sc->facts->NumberOfPorts, M_MPT2, M_ZERO|M_WAITOK);
  965         for (i = 0; i < sc->facts->NumberOfPorts; i++) {
  966                 if ((error = mps_get_portfacts(sc, &sc->pfacts[i], i)) != 0) {
  967                         mps_free(sc);
  968                         return (error);
  969                 }
  970                 mps_print_portfacts(sc, &sc->pfacts[i]);
  971         }
  972 
  973         /* Attach the subsystems so they can prepare their event masks. */
  974         /* XXX Should be dynamic so that IM/IR and user modules can attach */
  975         if (((error = mps_attach_log(sc)) != 0) ||
  976             ((error = mps_attach_sas(sc)) != 0) ||
  977             ((error = mps_attach_user(sc)) != 0)) {
  978                 mps_printf(sc, "%s failed to attach all subsystems: error %d\n",
  979                     __func__, error);
  980                 mps_free(sc);
  981                 return (error);
  982         }
  983 
  984         if ((error = mps_pci_setup_interrupts(sc)) != 0) {
  985                 mps_free(sc);
  986                 return (error);
  987         }
  988 
  989         /* Start the periodic watchdog check on the IOC Doorbell */
  990         mps_periodic(sc);
  991 
  992         /*
  993          * The portenable will kick off discovery events that will drive the
  994          * rest of the initialization process.  The CAM/SAS module will
  995          * hold up the boot sequence until discovery is complete.
  996          */
  997         sc->mps_ich.ich_func = mps_startup;
  998         sc->mps_ich.ich_arg = sc;
  999         if (config_intrhook_establish(&sc->mps_ich) != 0) {
 1000                 mps_dprint(sc, MPS_FAULT, "Cannot establish MPS config hook\n");
 1001                 error = EINVAL;
 1002         }
 1003 
 1004         sc->mps_flags |= MPS_FLAGS_ATTACH_DONE;
 1005 
 1006         return (error);
 1007 }
 1008 
 1009 static void
 1010 mps_startup(void *arg)
 1011 {
 1012         struct mps_softc *sc;
 1013 
 1014         sc = (struct mps_softc *)arg;
 1015 
 1016         mps_lock(sc);
 1017         mps_unmask_intr(sc);
 1018         mps_send_portenable(sc);
 1019         mps_unlock(sc);
 1020 }
 1021 
 1022 /* Periodic watchdog.  Is called with the driver lock already held. */
 1023 static void
 1024 mps_periodic(void *arg)
 1025 {
 1026         struct mps_softc *sc;
 1027         uint32_t db;
 1028 
 1029         sc = (struct mps_softc *)arg;
 1030         if (sc->mps_flags & MPS_FLAGS_SHUTDOWN)
 1031                 return;
 1032 
 1033         db = mps_regread(sc, MPI2_DOORBELL_OFFSET);
 1034         if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
 1035                 device_printf(sc->mps_dev, "IOC Fault 0x%08x, Resetting\n", db);
 1036                 /* XXX Need to broaden this to re-initialize the chip */
 1037                 mps_hard_reset(sc);
 1038                 db = mps_regread(sc, MPI2_DOORBELL_OFFSET);
 1039                 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
 1040                         device_printf(sc->mps_dev, "Second IOC Fault 0x%08x, "
 1041                             "Giving up!\n", db);
 1042                         return;
 1043                 }
 1044         }
 1045 
 1046         callout_reset(&sc->periodic, MPS_PERIODIC_DELAY * hz, mps_periodic, sc);
 1047 }
 1048 
 1049 static void
 1050 mps_startup_complete(struct mps_softc *sc, struct mps_command *cm)
 1051 {
 1052         MPI2_PORT_ENABLE_REPLY *reply;
 1053 
 1054         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
 1055 
 1056         reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
 1057         if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
 1058                 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
 1059 
 1060         mps_free_command(sc, cm);
 1061         config_intrhook_disestablish(&sc->mps_ich);
 1062 
 1063 }
 1064 
 1065 static void
 1066 mps_log_evt_handler(struct mps_softc *sc, uintptr_t data,
 1067     MPI2_EVENT_NOTIFICATION_REPLY *event)
 1068 {
 1069         MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry;
 1070 
 1071         mps_print_event(sc, event);
 1072 
 1073         switch (event->Event) {
 1074         case MPI2_EVENT_LOG_DATA:
 1075                 device_printf(sc->mps_dev, "MPI2_EVENT_LOG_DATA:\n");
 1076                 hexdump(event->EventData, event->EventDataLength, NULL, 0);
 1077                 break;
 1078         case MPI2_EVENT_LOG_ENTRY_ADDED:
 1079                 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData;
 1080                 mps_dprint(sc, MPS_INFO, "MPI2_EVENT_LOG_ENTRY_ADDED event "
 1081                     "0x%x Sequence %d:\n", entry->LogEntryQualifier,
 1082                      entry->LogSequence);
 1083                 break;
 1084         default:
 1085                 break;
 1086         }
 1087         return;
 1088 }
 1089 
 1090 static int
 1091 mps_attach_log(struct mps_softc *sc)
 1092 {
 1093         uint8_t events[16];
 1094 
 1095         bzero(events, 16);
 1096         setbit(events, MPI2_EVENT_LOG_DATA);
 1097         setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
 1098 
 1099         mps_register_events(sc, events, mps_log_evt_handler, NULL,
 1100             &sc->mps_log_eh);
 1101 
 1102         return (0);
 1103 }
 1104 
 1105 static int
 1106 mps_detach_log(struct mps_softc *sc)
 1107 {
 1108 
 1109         if (sc->mps_log_eh != NULL)
 1110                 mps_deregister_events(sc, sc->mps_log_eh);
 1111         return (0);
 1112 }
 1113 
 1114 /*
 1115  * Free all of the driver resources and detach submodules.  Should be called
 1116  * without the lock held.
 1117  */
 1118 int
 1119 mps_free(struct mps_softc *sc)
 1120 {
 1121         struct mps_command *cm;
 1122         int i, error;
 1123 
 1124         /* Turn off the watchdog */
 1125         mps_lock(sc);
 1126         sc->mps_flags |= MPS_FLAGS_SHUTDOWN;
 1127         mps_unlock(sc);
 1128         /* Lock must not be held for this */
 1129         callout_drain(&sc->periodic);
 1130 
 1131         if (((error = mps_detach_log(sc)) != 0) ||
 1132             ((error = mps_detach_sas(sc)) != 0))
 1133                 return (error);
 1134 
 1135         /* Put the IOC back in the READY state. */
 1136         mps_lock(sc);
 1137         if ((error = mps_send_mur(sc)) != 0) {
 1138                 mps_unlock(sc);
 1139                 return (error);
 1140         }
 1141         mps_unlock(sc);
 1142 
 1143         if (sc->facts != NULL)
 1144                 free(sc->facts, M_MPT2);
 1145 
 1146         if (sc->pfacts != NULL)
 1147                 free(sc->pfacts, M_MPT2);
 1148 
 1149         if (sc->post_busaddr != 0)
 1150                 bus_dmamap_unload(sc->queues_dmat, sc->queues_map);
 1151         if (sc->post_queue != NULL)
 1152                 bus_dmamem_free(sc->queues_dmat, sc->post_queue,
 1153                     sc->queues_map);
 1154         if (sc->queues_dmat != NULL)
 1155                 bus_dma_tag_destroy(sc->queues_dmat);
 1156 
 1157         if (sc->chain_busaddr != 0)
 1158                 bus_dmamap_unload(sc->chain_dmat, sc->chain_map);
 1159         if (sc->chain_frames != NULL)
 1160                 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,sc->chain_map);
 1161         if (sc->chain_dmat != NULL)
 1162                 bus_dma_tag_destroy(sc->chain_dmat);
 1163 
 1164         if (sc->sense_busaddr != 0)
 1165                 bus_dmamap_unload(sc->sense_dmat, sc->sense_map);
 1166         if (sc->sense_frames != NULL)
 1167                 bus_dmamem_free(sc->sense_dmat, sc->sense_frames,sc->sense_map);
 1168         if (sc->sense_dmat != NULL)
 1169                 bus_dma_tag_destroy(sc->sense_dmat);
 1170 
 1171         if (sc->reply_busaddr != 0)
 1172                 bus_dmamap_unload(sc->reply_dmat, sc->reply_map);
 1173         if (sc->reply_frames != NULL)
 1174                 bus_dmamem_free(sc->reply_dmat, sc->reply_frames,sc->reply_map);
 1175         if (sc->reply_dmat != NULL)
 1176                 bus_dma_tag_destroy(sc->reply_dmat);
 1177 
 1178         if (sc->req_busaddr != 0)
 1179                 bus_dmamap_unload(sc->req_dmat, sc->req_map);
 1180         if (sc->req_frames != NULL)
 1181                 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map);
 1182         if (sc->req_dmat != NULL)
 1183                 bus_dma_tag_destroy(sc->req_dmat);
 1184 
 1185         if (sc->chains != NULL)
 1186                 free(sc->chains, M_MPT2);
 1187         if (sc->commands != NULL) {
 1188                 for (i = 1; i < sc->num_reqs; i++) {
 1189                         cm = &sc->commands[i];
 1190                         bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap);
 1191                 }
 1192                 free(sc->commands, M_MPT2);
 1193         }
 1194         if (sc->buffer_dmat != NULL)
 1195                 bus_dma_tag_destroy(sc->buffer_dmat);
 1196 
 1197         if (sc->sysctl_tree != NULL)
 1198                 sysctl_ctx_free(&sc->sysctl_ctx);
 1199 
 1200         mtx_destroy(&sc->mps_mtx);
 1201 
 1202         return (0);
 1203 }
 1204 
 1205 static __inline void
 1206 mps_complete_command(struct mps_command *cm)
 1207 {
 1208         if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
 1209                 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
 1210 
 1211         if (cm->cm_complete != NULL)
 1212                 cm->cm_complete(cm->cm_sc, cm);
 1213 
 1214         if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
 1215                 mps_dprint(cm->cm_sc, MPS_TRACE, "%s: waking up %p\n",
 1216                            __func__, cm);
 1217                 wakeup(cm);
 1218         }
 1219 }
 1220 
 1221 void
 1222 mps_intr(void *data)
 1223 {
 1224         struct mps_softc *sc;
 1225         uint32_t status;
 1226 
 1227         sc = (struct mps_softc *)data;
 1228         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
 1229 
 1230         /*
 1231          * Check interrupt status register to flush the bus.  This is
 1232          * needed for both INTx interrupts and driver-driven polling
 1233          */
 1234         status = mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
 1235         if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0)
 1236                 return;
 1237 
 1238         mps_lock(sc);
 1239         mps_intr_locked(data);
 1240         mps_unlock(sc);
 1241         return;
 1242 }
 1243 
 1244 /*
 1245  * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the
 1246  * chip.  Hopefully this theory is correct.
 1247  */
 1248 void
 1249 mps_intr_msi(void *data)
 1250 {
 1251         struct mps_softc *sc;
 1252 
 1253         sc = (struct mps_softc *)data;
 1254         mps_lock(sc);
 1255         mps_intr_locked(data);
 1256         mps_unlock(sc);
 1257         return;
 1258 }
 1259 
 1260 /*
 1261  * The locking is overly broad and simplistic, but easy to deal with for now.
 1262  */
 1263 void
 1264 mps_intr_locked(void *data)
 1265 {
 1266         MPI2_REPLY_DESCRIPTORS_UNION *desc;
 1267         struct mps_softc *sc;
 1268         struct mps_command *cm = NULL;
 1269         uint8_t flags;
 1270         u_int pq;
 1271 
 1272         sc = (struct mps_softc *)data;
 1273 
 1274         pq = sc->replypostindex;
 1275 
 1276         for ( ;; ) {
 1277                 cm = NULL;
 1278                 desc = &sc->post_queue[pq];
 1279                 flags = desc->Default.ReplyFlags &
 1280                     MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
 1281                 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
 1282                  || (desc->Words.High == 0xffffffff))
 1283                         break;
 1284 
 1285                 switch (flags) {
 1286                 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS:
 1287                         cm = &sc->commands[desc->SCSIIOSuccess.SMID];
 1288                         cm->cm_reply = NULL;
 1289                         break;
 1290                 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY:
 1291                 {
 1292                         uint32_t baddr;
 1293                         uint8_t *reply;
 1294 
 1295                         /*
 1296                          * Re-compose the reply address from the address
 1297                          * sent back from the chip.  The ReplyFrameAddress
 1298                          * is the lower 32 bits of the physical address of
 1299                          * particular reply frame.  Convert that address to
 1300                          * host format, and then use that to provide the
 1301                          * offset against the virtual address base
 1302                          * (sc->reply_frames).
 1303                          */
 1304                         baddr = le32toh(desc->AddressReply.ReplyFrameAddress);
 1305                         reply = sc->reply_frames +
 1306                                 (baddr - ((uint32_t)sc->reply_busaddr));
 1307                         /*
 1308                          * Make sure the reply we got back is in a valid
 1309                          * range.  If not, go ahead and panic here, since
 1310                          * we'll probably panic as soon as we deference the
 1311                          * reply pointer anyway.
 1312                          */
 1313                         if ((reply < sc->reply_frames)
 1314                          || (reply > (sc->reply_frames +
 1315                              (sc->fqdepth * sc->facts->ReplyFrameSize * 4)))) {
 1316                                 printf("%s: WARNING: reply %p out of range!\n",
 1317                                        __func__, reply);
 1318                                 printf("%s: reply_frames %p, fqdepth %d, "
 1319                                        "frame size %d\n", __func__,
 1320                                        sc->reply_frames, sc->fqdepth,
 1321                                        sc->facts->ReplyFrameSize * 4);
 1322                                 printf("%s: baddr %#x,\n", __func__, baddr);
 1323                                 panic("Reply address out of range");
 1324                         }
 1325                         if (desc->AddressReply.SMID == 0) {
 1326                                 mps_dispatch_event(sc, baddr,
 1327                                    (MPI2_EVENT_NOTIFICATION_REPLY *) reply);
 1328                         } else {
 1329                                 cm = &sc->commands[desc->AddressReply.SMID];
 1330                                 cm->cm_reply = reply;
 1331                                 cm->cm_reply_data =
 1332                                     desc->AddressReply.ReplyFrameAddress;
 1333                         }
 1334                         break;
 1335                 }
 1336                 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS:
 1337                 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER:
 1338                 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS:
 1339                 default:
 1340                         /* Unhandled */
 1341                         device_printf(sc->mps_dev, "Unhandled reply 0x%x\n",
 1342                             desc->Default.ReplyFlags);
 1343                         cm = NULL;
 1344                         break;
 1345                 }
 1346 
 1347                 if (cm != NULL)
 1348                         mps_complete_command(cm);
 1349 
 1350                 desc->Words.Low = 0xffffffff;
 1351                 desc->Words.High = 0xffffffff;
 1352                 if (++pq >= sc->pqdepth)
 1353                         pq = 0;
 1354         }
 1355 
 1356         if (pq != sc->replypostindex) {
 1357                 mps_dprint(sc, MPS_INFO, "writing postindex %d\n", pq);
 1358                 mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, pq);
 1359                 sc->replypostindex = pq;
 1360         }
 1361 
 1362         return;
 1363 }
 1364 
 1365 static void
 1366 mps_dispatch_event(struct mps_softc *sc, uintptr_t data,
 1367     MPI2_EVENT_NOTIFICATION_REPLY *reply)
 1368 {
 1369         struct mps_event_handle *eh;
 1370         int event, handled = 0;
 1371 
 1372         event = reply->Event;
 1373         TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
 1374                 if (isset(eh->mask, event)) {
 1375                         eh->callback(sc, data, reply);
 1376                         handled++;
 1377                 }
 1378         }
 1379 
 1380         if (handled == 0)
 1381                 device_printf(sc->mps_dev, "Unhandled event 0x%x\n", event);
 1382 }
 1383 
 1384 /*
 1385  * For both register_events and update_events, the caller supplies a bitmap
 1386  * of events that it _wants_.  These functions then turn that into a bitmask
 1387  * suitable for the controller.
 1388  */
 1389 int
 1390 mps_register_events(struct mps_softc *sc, uint8_t *mask,
 1391     mps_evt_callback_t *cb, void *data, struct mps_event_handle **handle)
 1392 {
 1393         struct mps_event_handle *eh;
 1394         int error = 0;
 1395 
 1396         eh = malloc(sizeof(struct mps_event_handle), M_MPT2, M_WAITOK|M_ZERO);
 1397         eh->callback = cb;
 1398         eh->data = data;
 1399         TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list);
 1400         if (mask != NULL)
 1401                 error = mps_update_events(sc, eh, mask);
 1402         *handle = eh;
 1403 
 1404         return (error);
 1405 }
 1406 
 1407 int
 1408 mps_update_events(struct mps_softc *sc, struct mps_event_handle *handle,
 1409     uint8_t *mask)
 1410 {
 1411         MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
 1412         MPI2_EVENT_NOTIFICATION_REPLY *reply;
 1413         struct mps_command *cm;
 1414         struct mps_event_handle *eh;
 1415         int error, i;
 1416 
 1417         mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
 1418 
 1419         if ((mask != NULL) && (handle != NULL))
 1420                 bcopy(mask, &handle->mask[0], 16);
 1421         memset(sc->event_mask, 0xff, 16);
 1422 
 1423         TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
 1424                 for (i = 0; i < 16; i++)
 1425                         sc->event_mask[i] &= ~eh->mask[i];
 1426         }
 1427 
 1428         if ((cm = mps_alloc_command(sc)) == NULL)
 1429                 return (EBUSY);
 1430         evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
 1431         evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
 1432         evtreq->MsgFlags = 0;
 1433         evtreq->SASBroadcastPrimitiveMasks = 0;
 1434 #ifdef MPS_DEBUG_ALL_EVENTS
 1435         {
 1436                 u_char fullmask[16];
 1437                 memset(fullmask, 0x00, 16);
 1438                 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16);
 1439         }
 1440 #else
 1441                 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16);
 1442 #endif
 1443         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
 1444         cm->cm_data = NULL;
 1445 
 1446         error = mps_request_polled(sc, cm);
 1447         reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply;
 1448         if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
 1449                 error = ENXIO;
 1450         mps_print_event(sc, reply);
 1451 
 1452         mps_free_command(sc, cm);
 1453         return (error);
 1454 }
 1455 
 1456 int
 1457 mps_deregister_events(struct mps_softc *sc, struct mps_event_handle *handle)
 1458 {
 1459 
 1460         TAILQ_REMOVE(&sc->event_list, handle, eh_list);
 1461         free(handle, M_MPT2);
 1462         return (mps_update_events(sc, NULL, NULL));
 1463 }
 1464 
 1465 /*
 1466  * Add a chain element as the next SGE for the specified command.
 1467  * Reset cm_sge and cm_sgesize to indicate all the available space.
 1468  */
 1469 static int
 1470 mps_add_chain(struct mps_command *cm)
 1471 {
 1472         MPI2_SGE_CHAIN32 *sgc;
 1473         struct mps_chain *chain;
 1474         int space;
 1475 
 1476         if (cm->cm_sglsize < MPS_SGC_SIZE)
 1477                 panic("MPS: Need SGE Error Code\n");
 1478 
 1479         chain = mps_alloc_chain(cm->cm_sc);
 1480         if (chain == NULL)
 1481                 return (ENOBUFS);
 1482 
 1483         space = (int)cm->cm_sc->facts->IOCRequestFrameSize * 4;
 1484 
 1485         /*
 1486          * Note: a double-linked list is used to make it easier to
 1487          * walk for debugging.
 1488          */
 1489         TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link);
 1490 
 1491         sgc = (MPI2_SGE_CHAIN32 *)&cm->cm_sge->MpiChain;
 1492         sgc->Length = space;
 1493         sgc->NextChainOffset = 0;
 1494         sgc->Flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT;
 1495         sgc->Address = chain->chain_busaddr;
 1496 
 1497         cm->cm_sge = (MPI2_SGE_IO_UNION *)&chain->chain->MpiSimple;
 1498         cm->cm_sglsize = space;
 1499         return (0);
 1500 }
 1501 
 1502 /*
 1503  * Add one scatter-gather element (chain, simple, transaction context)
 1504  * to the scatter-gather list for a command.  Maintain cm_sglsize and
 1505  * cm_sge as the remaining size and pointer to the next SGE to fill
 1506  * in, respectively.
 1507  */
 1508 int
 1509 mps_push_sge(struct mps_command *cm, void *sgep, size_t len, int segsleft)
 1510 {
 1511         MPI2_SGE_TRANSACTION_UNION *tc = sgep;
 1512         MPI2_SGE_SIMPLE64 *sge = sgep;
 1513         int error, type;
 1514 
 1515         type = (tc->Flags & MPI2_SGE_FLAGS_ELEMENT_MASK);
 1516 
 1517 #ifdef INVARIANTS
 1518         switch (type) {
 1519         case MPI2_SGE_FLAGS_TRANSACTION_ELEMENT: {
 1520                 if (len != tc->DetailsLength + 4)
 1521                         panic("TC %p length %u or %zu?", tc,
 1522                             tc->DetailsLength + 4, len);
 1523                 }
 1524                 break;
 1525         case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
 1526                 /* Driver only uses 32-bit chain elements */
 1527                 if (len != MPS_SGC_SIZE)
 1528                         panic("CHAIN %p length %u or %zu?", sgep,
 1529                             MPS_SGC_SIZE, len);
 1530                 break;
 1531         case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
 1532                 /* Driver only uses 64-bit SGE simple elements */
 1533                 sge = sgep;
 1534                 if (len != MPS_SGE64_SIZE)
 1535                         panic("SGE simple %p length %u or %zu?", sge,
 1536                             MPS_SGE64_SIZE, len);
 1537                 if (((sge->FlagsLength >> MPI2_SGE_FLAGS_SHIFT) &
 1538                     MPI2_SGE_FLAGS_ADDRESS_SIZE) == 0)
 1539                         panic("SGE simple %p flags %02x not marked 64-bit?",
 1540                             sge, sge->FlagsLength >> MPI2_SGE_FLAGS_SHIFT);
 1541 
 1542                 break;
 1543         default:
 1544                 panic("Unexpected SGE %p, flags %02x", tc, tc->Flags);
 1545         }
 1546 #endif
 1547 
 1548         /*
 1549          * case 1: 1 more segment, enough room for it
 1550          * case 2: 2 more segments, enough room for both
 1551          * case 3: >=2 more segments, only enough room for 1 and a chain
 1552          * case 4: >=1 more segment, enough room for only a chain
 1553          * case 5: >=1 more segment, no room for anything (error)
 1554          */
 1555 
 1556         /*
 1557          * There should be room for at least a chain element, or this
 1558          * code is buggy.  Case (5).
 1559          */
 1560         if (cm->cm_sglsize < MPS_SGC_SIZE)
 1561                 panic("MPS: Need SGE Error Code\n");
 1562 
 1563         if (segsleft >= 2 &&
 1564             cm->cm_sglsize < len + MPS_SGC_SIZE + MPS_SGE64_SIZE) {
 1565                 /*
 1566                  * There are 2 or more segments left to add, and only
 1567                  * enough room for 1 and a chain.  Case (3).
 1568                  *
 1569                  * Mark as last element in this chain if necessary.
 1570                  */
 1571                 if (type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) {
 1572                         sge->FlagsLength |=
 1573                                 (MPI2_SGE_FLAGS_LAST_ELEMENT << MPI2_SGE_FLAGS_SHIFT);
 1574                 }
 1575 
 1576                 /*
 1577                  * Add the item then a chain.  Do the chain now,
 1578                  * rather than on the next iteration, to simplify
 1579                  * understanding the code.
 1580                  */
 1581                 cm->cm_sglsize -= len;
 1582                 bcopy(sgep, cm->cm_sge, len);
 1583                 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
 1584                 return (mps_add_chain(cm));
 1585         }
 1586 
 1587         if (segsleft >= 1 && cm->cm_sglsize < len + MPS_SGC_SIZE) {
 1588                 /*
 1589                  * 1 or more segment, enough room for only a chain.
 1590                  * Hope the previous element wasn't a Simple entry
 1591                  * that needed to be marked with
 1592                  * MPI2_SGE_FLAGS_LAST_ELEMENT.  Case (4).
 1593                  */
 1594                 if ((error = mps_add_chain(cm)) != 0)
 1595                         return (error);
 1596         }
 1597 
 1598 #ifdef INVARIANTS
 1599         /* Case 1: 1 more segment, enough room for it. */
 1600         if (segsleft == 1 && cm->cm_sglsize < len)
 1601                 panic("1 seg left and no room? %u versus %zu",
 1602                     cm->cm_sglsize, len);
 1603 
 1604         /* Case 2: 2 more segments, enough room for both */
 1605         if (segsleft == 2 && cm->cm_sglsize < len + MPS_SGE64_SIZE)
 1606                 panic("2 segs left and no room? %u versus %zu",
 1607                     cm->cm_sglsize, len);
 1608 #endif
 1609 
 1610         if (segsleft == 1 && type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) {
 1611                 /*
 1612                  * Last element of the last segment of the entire
 1613                  * buffer.
 1614                  */
 1615                 sge->FlagsLength |= ((MPI2_SGE_FLAGS_LAST_ELEMENT |
 1616                     MPI2_SGE_FLAGS_END_OF_BUFFER |
 1617                     MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
 1618         }
 1619 
 1620         cm->cm_sglsize -= len;
 1621         bcopy(sgep, cm->cm_sge, len);
 1622         cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
 1623         return (0);
 1624 }
 1625 
 1626 /*
 1627  * Add one dma segment to the scatter-gather list for a command.
 1628  */
 1629 int
 1630 mps_add_dmaseg(struct mps_command *cm, vm_paddr_t pa, size_t len, u_int flags,
 1631     int segsleft)
 1632 {
 1633         MPI2_SGE_SIMPLE64 sge;
 1634 
 1635         /*
 1636          * This driver always uses 64-bit address elements for
 1637          * simplicity.
 1638          */
 1639         flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_ADDRESS_SIZE;
 1640         sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT);
 1641         mps_from_u64(pa, &sge.Address);
 1642 
 1643         return (mps_push_sge(cm, &sge, sizeof sge, segsleft));
 1644 }
 1645 
 1646 static void
 1647 mps_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
 1648 {
 1649         struct mps_softc *sc;
 1650         struct mps_command *cm;
 1651         u_int i, dir, sflags;
 1652 
 1653         cm = (struct mps_command *)arg;
 1654         sc = cm->cm_sc;
 1655 
 1656         /*
 1657          * In this case, just print out a warning and let the chip tell the
 1658          * user they did the wrong thing.
 1659          */
 1660         if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) {
 1661                 mps_printf(sc, "%s: warning: busdma returned %d segments, "
 1662                            "more than the %d allowed\n", __func__, nsegs,
 1663                            cm->cm_max_segs);
 1664         }
 1665 
 1666         /*
 1667          * Set up DMA direction flags.  Note that we don't support
 1668          * bi-directional transfers, with the exception of SMP passthrough.
 1669          */
 1670         sflags = 0;
 1671         if (cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) {
 1672                 /*
 1673                  * We have to add a special case for SMP passthrough, there
 1674                  * is no easy way to generically handle it.  The first
 1675                  * S/G element is used for the command (therefore the
 1676                  * direction bit needs to be set).  The second one is used
 1677                  * for the reply.  We'll leave it to the caller to make
 1678                  * sure we only have two buffers.
 1679                  */
 1680                 /*
 1681                  * Even though the busdma man page says it doesn't make
 1682                  * sense to have both direction flags, it does in this case.
 1683                  * We have one s/g element being accessed in each direction.
 1684                  */
 1685                 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD;
 1686 
 1687                 /*
 1688                  * Set the direction flag on the first buffer in the SMP
 1689                  * passthrough request.  We'll clear it for the second one.
 1690                  */
 1691                 sflags |= MPI2_SGE_FLAGS_DIRECTION |
 1692                           MPI2_SGE_FLAGS_END_OF_BUFFER;
 1693         } else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) {
 1694                 sflags |= MPI2_SGE_FLAGS_DIRECTION;
 1695                 dir = BUS_DMASYNC_PREWRITE;
 1696         } else
 1697                 dir = BUS_DMASYNC_PREREAD;
 1698 
 1699         for (i = 0; i < nsegs; i++) {
 1700                 if ((cm->cm_flags & MPS_CM_FLAGS_SMP_PASS)
 1701                  && (i != 0)) {
 1702                         sflags &= ~MPI2_SGE_FLAGS_DIRECTION;
 1703                 }
 1704                 error = mps_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len,
 1705                     sflags, nsegs - i);
 1706                 if (error != 0) {
 1707                         /* Resource shortage, roll back! */
 1708                         mps_dprint(sc, MPS_INFO, "out of chain frames\n");
 1709                         cm->cm_flags |= MPS_CM_FLAGS_CHAIN_FAILED;
 1710                         mps_complete_command(cm);
 1711                         return;
 1712                 }
 1713         }
 1714 
 1715         bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
 1716         mps_enqueue_request(sc, cm);
 1717 
 1718         return;
 1719 }
 1720 
 1721 static void
 1722 mps_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize,
 1723              int error)
 1724 {
 1725         mps_data_cb(arg, segs, nsegs, error);
 1726 }
 1727 
 1728 /*
 1729  * Note that the only error path here is from bus_dmamap_load(), which can
 1730  * return EINPROGRESS if it is waiting for resources.
 1731  */
 1732 int
 1733 mps_map_command(struct mps_softc *sc, struct mps_command *cm)
 1734 {
 1735         MPI2_SGE_SIMPLE32 *sge;
 1736         int error = 0;
 1737 
 1738         if (cm->cm_flags & MPS_CM_FLAGS_USE_UIO) {
 1739                 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap,
 1740                     &cm->cm_uio, mps_data_cb2, cm, 0);
 1741         } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) {
 1742                 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap,
 1743                     cm->cm_data, cm->cm_length, mps_data_cb, cm, 0);
 1744         } else {
 1745                 /* Add a zero-length element as needed */
 1746                 if (cm->cm_sge != NULL) {
 1747                         sge = (MPI2_SGE_SIMPLE32 *)cm->cm_sge;
 1748                         sge->FlagsLength = (MPI2_SGE_FLAGS_LAST_ELEMENT |
 1749                             MPI2_SGE_FLAGS_END_OF_BUFFER |
 1750                             MPI2_SGE_FLAGS_END_OF_LIST |
 1751                             MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
 1752                             MPI2_SGE_FLAGS_SHIFT;
 1753                         sge->Address = 0;
 1754                 }
 1755                 mps_enqueue_request(sc, cm);
 1756         }
 1757 
 1758         return (error);
 1759 }
 1760 
 1761 /*
 1762  * The MPT driver had a verbose interface for config pages.  In this driver,
 1763  * reduce it to much simplier terms, similar to the Linux driver.
 1764  */
 1765 int
 1766 mps_read_config_page(struct mps_softc *sc, struct mps_config_params *params)
 1767 {
 1768         MPI2_CONFIG_REQUEST *req;
 1769         struct mps_command *cm;
 1770         int error;
 1771 
 1772         if (sc->mps_flags & MPS_FLAGS_BUSY) {
 1773                 return (EBUSY);
 1774         }
 1775 
 1776         cm = mps_alloc_command(sc);
 1777         if (cm == NULL) {
 1778                 return (EBUSY);
 1779         }
 1780 
 1781         req = (MPI2_CONFIG_REQUEST *)cm->cm_req;
 1782         req->Function = MPI2_FUNCTION_CONFIG;
 1783         req->Action = params->action;
 1784         req->SGLFlags = 0;
 1785         req->ChainOffset = 0;
 1786         req->PageAddress = params->page_address;
 1787         if (params->hdr.Ext.ExtPageType != 0) {
 1788                 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr;
 1789 
 1790                 hdr = &params->hdr.Ext;
 1791                 req->ExtPageType = hdr->ExtPageType;
 1792                 req->ExtPageLength = hdr->ExtPageLength;
 1793                 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
 1794                 req->Header.PageLength = 0; /* Must be set to zero */
 1795                 req->Header.PageNumber = hdr->PageNumber;
 1796                 req->Header.PageVersion = hdr->PageVersion;
 1797         } else {
 1798                 MPI2_CONFIG_PAGE_HEADER *hdr;
 1799 
 1800                 hdr = &params->hdr.Struct;
 1801                 req->Header.PageType = hdr->PageType;
 1802                 req->Header.PageNumber = hdr->PageNumber;
 1803                 req->Header.PageLength = hdr->PageLength;
 1804                 req->Header.PageVersion = hdr->PageVersion;
 1805         }
 1806 
 1807         cm->cm_data = params->buffer;
 1808         cm->cm_length = params->length;
 1809         cm->cm_sge = &req->PageBufferSGE;
 1810         cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
 1811         cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN;
 1812         cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
 1813 
 1814         cm->cm_complete_data = params;
 1815         if (params->callback != NULL) {
 1816                 cm->cm_complete = mps_config_complete;
 1817                 return (mps_map_command(sc, cm));
 1818         } else {
 1819                 cm->cm_complete = NULL;
 1820                 cm->cm_flags |= MPS_CM_FLAGS_WAKEUP;
 1821                 if ((error = mps_map_command(sc, cm)) != 0)
 1822                         return (error);
 1823                 msleep(cm, &sc->mps_mtx, 0, "mpswait", 0);
 1824                 mps_config_complete(sc, cm);
 1825         }
 1826 
 1827         return (0);
 1828 }
 1829 
 1830 int
 1831 mps_write_config_page(struct mps_softc *sc, struct mps_config_params *params)
 1832 {
 1833         return (EINVAL);
 1834 }
 1835 
 1836 static void
 1837 mps_config_complete(struct mps_softc *sc, struct mps_command *cm)
 1838 {
 1839         MPI2_CONFIG_REPLY *reply;
 1840         struct mps_config_params *params;
 1841 
 1842         params = cm->cm_complete_data;
 1843 
 1844         if (cm->cm_data != NULL) {
 1845                 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
 1846                     BUS_DMASYNC_POSTREAD);
 1847                 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
 1848         }
 1849 
 1850         /*
 1851          * XXX KDM need to do more error recovery?  This results in the
 1852          * device in question not getting probed.
 1853          */
 1854         if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
 1855                 params->status = MPI2_IOCSTATUS_BUSY;
 1856                 goto bailout;
 1857         }
 1858 
 1859         reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
 1860         params->status = reply->IOCStatus;
 1861         if (params->hdr.Ext.ExtPageType != 0) {
 1862                 params->hdr.Ext.ExtPageType = reply->ExtPageType;
 1863                 params->hdr.Ext.ExtPageLength = reply->ExtPageLength;
 1864         } else {
 1865                 params->hdr.Struct.PageType = reply->Header.PageType;
 1866                 params->hdr.Struct.PageNumber = reply->Header.PageNumber;
 1867                 params->hdr.Struct.PageLength = reply->Header.PageLength;
 1868                 params->hdr.Struct.PageVersion = reply->Header.PageVersion;
 1869         }
 1870 
 1871 bailout:
 1872 
 1873         mps_free_command(sc, cm);
 1874         if (params->callback != NULL)
 1875                 params->callback(sc, params);
 1876 
 1877         return;
 1878 }

Cache object: 77b2046cc4801f63692e2fbe8bcd167f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.