The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mpt/mpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Generic routines for LSI Fusion adapters.
    3  * FreeBSD Version.
    4  *
    5  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-3-Clause
    6  *
    7  * Copyright (c) 2000, 2001 by Greg Ansley
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice immediately at the beginning of the file, without modification,
   14  *    this list of conditions, and the following disclaimer.
   15  * 2. The name of the author may not be used to endorse or promote products
   16  *    derived from this software without specific prior written permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   22  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  */
   30 /*-
   31  * Copyright (c) 2002, 2006 by Matthew Jacob
   32  * All rights reserved.
   33  *
   34  * Redistribution and use in source and binary forms, with or without
   35  * modification, are permitted provided that the following conditions are
   36  * met:
   37  * 1. Redistributions of source code must retain the above copyright
   38  *    notice, this list of conditions and the following disclaimer.
   39  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
   40  *    substantially similar to the "NO WARRANTY" disclaimer below
   41  *    ("Disclaimer") and any redistribution must be conditioned upon including
   42  *    a substantially similar Disclaimer requirement for further binary
   43  *    redistribution.
   44  * 3. Neither the names of the above listed copyright holders nor the names
   45  *    of any contributors may be used to endorse or promote products derived
   46  *    from this software without specific prior written permission.
   47  *
   48  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   49  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   51  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   52  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   53  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   54  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   55  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   56  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   57  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
   58  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   59  *
   60  * Support from Chris Ellsworth in order to make SAS adapters work
   61  * is gratefully acknowledged.
   62  *
   63  *
   64  * Support from LSI-Logic has also gone a great deal toward making this a
   65  * workable subsystem and is gratefully acknowledged.
   66  */
   67 /*-
   68  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
   69  * Copyright (c) 2005, WHEEL Sp. z o.o.
   70  * Copyright (c) 2004, 2005 Justin T. Gibbs
   71  * All rights reserved.
   72  *
   73  * Redistribution and use in source and binary forms, with or without
   74  * modification, are permitted provided that the following conditions are
   75  * met:
   76  * 1. Redistributions of source code must retain the above copyright
   77  *    notice, this list of conditions and the following disclaimer.
   78  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
   79  *    substantially similar to the "NO WARRANTY" disclaimer below
   80  *    ("Disclaimer") and any redistribution must be conditioned upon including
   81  *    a substantially similar Disclaimer requirement for further binary
   82  *    redistribution.
   83  * 3. Neither the names of the above listed copyright holders nor the names
   84  *    of any contributors may be used to endorse or promote products derived
   85  *    from this software without specific prior written permission.
   86  *
   87  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   88  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   89  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   90  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   91  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   92  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   93  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   94  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   95  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   96  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
   97  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   98  */
   99 
  100 #include <sys/cdefs.h>
  101 __FBSDID("$FreeBSD$");
  102 
  103 #include <dev/mpt/mpt.h>
  104 #include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
  105 #include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
  106 
  107 #include <dev/mpt/mpilib/mpi.h>
  108 #include <dev/mpt/mpilib/mpi_ioc.h>
  109 #include <dev/mpt/mpilib/mpi_fc.h>
  110 #include <dev/mpt/mpilib/mpi_targ.h>
  111 
  112 #include <sys/sysctl.h>
  113 
  114 #define MPT_MAX_TRYS 3
  115 #define MPT_MAX_WAIT 300000
  116 
  117 static int maxwait_ack = 0;
  118 static int maxwait_int = 0;
  119 static int maxwait_state = 0;
  120 
  121 static TAILQ_HEAD(, mpt_softc)  mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
  122 mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
  123 
  124 static mpt_reply_handler_t mpt_default_reply_handler;
  125 static mpt_reply_handler_t mpt_config_reply_handler;
  126 static mpt_reply_handler_t mpt_handshake_reply_handler;
  127 static mpt_reply_handler_t mpt_event_reply_handler;
  128 static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
  129                                MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
  130 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
  131 static int mpt_soft_reset(struct mpt_softc *mpt);
  132 static void mpt_hard_reset(struct mpt_softc *mpt);
  133 static int mpt_dma_buf_alloc(struct mpt_softc *mpt);
  134 static void mpt_dma_buf_free(struct mpt_softc *mpt);
  135 static int mpt_configure_ioc(struct mpt_softc *mpt, int, int);
  136 static int mpt_enable_ioc(struct mpt_softc *mpt, int);
  137 
  138 /************************* Personality Module Support *************************/
  139 /*
  140  * We include one extra entry that is guaranteed to be NULL
  141  * to simplify our itterator.
  142  */
  143 static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
  144 static __inline struct mpt_personality*
  145         mpt_pers_find(struct mpt_softc *, u_int);
  146 static __inline struct mpt_personality*
  147         mpt_pers_find_reverse(struct mpt_softc *, u_int);
  148 
  149 static __inline struct mpt_personality *
  150 mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
  151 {
  152         KASSERT(start_at <= MPT_MAX_PERSONALITIES,
  153                 ("mpt_pers_find: starting position out of range"));
  154 
  155         while (start_at < MPT_MAX_PERSONALITIES
  156             && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
  157                 start_at++;
  158         }
  159         return (mpt_personalities[start_at]);
  160 }
  161 
  162 /*
  163  * Used infrequently, so no need to optimize like a forward
  164  * traversal where we use the MAX+1 is guaranteed to be NULL
  165  * trick.
  166  */
  167 static __inline struct mpt_personality *
  168 mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
  169 {
  170         while (start_at < MPT_MAX_PERSONALITIES
  171             && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
  172                 start_at--;
  173         }
  174         if (start_at < MPT_MAX_PERSONALITIES)
  175                 return (mpt_personalities[start_at]);
  176         return (NULL);
  177 }
  178 
  179 #define MPT_PERS_FOREACH(mpt, pers)                             \
  180         for (pers = mpt_pers_find(mpt, /*start_at*/0);          \
  181              pers != NULL;                                      \
  182              pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
  183 
  184 #define MPT_PERS_FOREACH_REVERSE(mpt, pers)                             \
  185         for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
  186              pers != NULL;                                              \
  187              pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
  188 
  189 static mpt_load_handler_t      mpt_stdload;
  190 static mpt_probe_handler_t     mpt_stdprobe;
  191 static mpt_attach_handler_t    mpt_stdattach;
  192 static mpt_enable_handler_t    mpt_stdenable;
  193 static mpt_ready_handler_t     mpt_stdready;
  194 static mpt_event_handler_t     mpt_stdevent;
  195 static mpt_reset_handler_t     mpt_stdreset;
  196 static mpt_shutdown_handler_t  mpt_stdshutdown;
  197 static mpt_detach_handler_t    mpt_stddetach;
  198 static mpt_unload_handler_t    mpt_stdunload;
  199 static struct mpt_personality mpt_default_personality =
  200 {
  201         .load           = mpt_stdload,
  202         .probe          = mpt_stdprobe,
  203         .attach         = mpt_stdattach,
  204         .enable         = mpt_stdenable,
  205         .ready          = mpt_stdready,
  206         .event          = mpt_stdevent,
  207         .reset          = mpt_stdreset,
  208         .shutdown       = mpt_stdshutdown,
  209         .detach         = mpt_stddetach,
  210         .unload         = mpt_stdunload
  211 };
  212 
  213 static mpt_load_handler_t      mpt_core_load;
  214 static mpt_attach_handler_t    mpt_core_attach;
  215 static mpt_enable_handler_t    mpt_core_enable;
  216 static mpt_reset_handler_t     mpt_core_ioc_reset;
  217 static mpt_event_handler_t     mpt_core_event;
  218 static mpt_shutdown_handler_t  mpt_core_shutdown;
  219 static mpt_shutdown_handler_t  mpt_core_detach;
  220 static mpt_unload_handler_t    mpt_core_unload;
  221 static struct mpt_personality mpt_core_personality =
  222 {
  223         .name           = "mpt_core",
  224         .load           = mpt_core_load,
  225 //      .attach         = mpt_core_attach,
  226 //      .enable         = mpt_core_enable,
  227         .event          = mpt_core_event,
  228         .reset          = mpt_core_ioc_reset,
  229         .shutdown       = mpt_core_shutdown,
  230         .detach         = mpt_core_detach,
  231         .unload         = mpt_core_unload,
  232 };
  233 
  234 /*
  235  * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
  236  * ordering information.  We want the core to always register FIRST.
  237  * other modules are set to SI_ORDER_SECOND.
  238  */
  239 static moduledata_t mpt_core_mod = {
  240         "mpt_core", mpt_modevent, &mpt_core_personality
  241 };
  242 DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
  243 MODULE_VERSION(mpt_core, 1);
  244 
  245 #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
  246 
  247 int
  248 mpt_modevent(module_t mod, int type, void *data)
  249 {
  250         struct mpt_personality *pers;
  251         int error;
  252 
  253         pers = (struct mpt_personality *)data;
  254 
  255         error = 0;
  256         switch (type) {
  257         case MOD_LOAD:
  258         {
  259                 mpt_load_handler_t **def_handler;
  260                 mpt_load_handler_t **pers_handler;
  261                 int i;
  262 
  263                 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
  264                         if (mpt_personalities[i] == NULL)
  265                                 break;
  266                 }
  267                 if (i >= MPT_MAX_PERSONALITIES) {
  268                         error = ENOMEM;
  269                         break;
  270                 }
  271                 pers->id = i;
  272                 mpt_personalities[i] = pers;
  273 
  274                 /* Install standard/noop handlers for any NULL entries. */
  275                 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
  276                 pers_handler = MPT_PERS_FIRST_HANDLER(pers);
  277                 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
  278                         if (*pers_handler == NULL)
  279                                 *pers_handler = *def_handler;
  280                         pers_handler++;
  281                         def_handler++;
  282                 }
  283                 
  284                 error = (pers->load(pers));
  285                 if (error != 0)
  286                         mpt_personalities[i] = NULL;
  287                 break;
  288         }
  289         case MOD_SHUTDOWN:
  290                 break;
  291         case MOD_QUIESCE:
  292                 break;
  293         case MOD_UNLOAD:
  294                 error = pers->unload(pers);
  295                 mpt_personalities[pers->id] = NULL;
  296                 break;
  297         default:
  298                 error = EINVAL;
  299                 break;
  300         }
  301         return (error);
  302 }
  303 
  304 static int
  305 mpt_stdload(struct mpt_personality *pers)
  306 {
  307 
  308         /* Load is always successful. */
  309         return (0);
  310 }
  311 
  312 static int
  313 mpt_stdprobe(struct mpt_softc *mpt)
  314 {
  315 
  316         /* Probe is always successful. */
  317         return (0);
  318 }
  319 
  320 static int
  321 mpt_stdattach(struct mpt_softc *mpt)
  322 {
  323 
  324         /* Attach is always successful. */
  325         return (0);
  326 }
  327 
  328 static int
  329 mpt_stdenable(struct mpt_softc *mpt)
  330 {
  331 
  332         /* Enable is always successful. */
  333         return (0);
  334 }
  335 
  336 static void
  337 mpt_stdready(struct mpt_softc *mpt)
  338 {
  339 
  340 }
  341 
  342 static int
  343 mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
  344 {
  345 
  346         mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
  347         /* Event was not for us. */
  348         return (0);
  349 }
  350 
  351 static void
  352 mpt_stdreset(struct mpt_softc *mpt, int type)
  353 {
  354 
  355 }
  356 
  357 static void
  358 mpt_stdshutdown(struct mpt_softc *mpt)
  359 {
  360 
  361 }
  362 
  363 static void
  364 mpt_stddetach(struct mpt_softc *mpt)
  365 {
  366 
  367 }
  368 
  369 static int
  370 mpt_stdunload(struct mpt_personality *pers)
  371 {
  372 
  373         /* Unload is always successful. */
  374         return (0);
  375 }
  376 
  377 /*
  378  * Post driver attachment, we may want to perform some global actions.
  379  * Here is the hook to do so.
  380  */
  381 
  382 static void
  383 mpt_postattach(void *unused)
  384 {
  385         struct mpt_softc *mpt;
  386         struct mpt_personality *pers;
  387 
  388         TAILQ_FOREACH(mpt, &mpt_tailq, links) {
  389                 MPT_PERS_FOREACH(mpt, pers)
  390                         pers->ready(mpt);
  391         }
  392 }
  393 SYSINIT(mptdev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, mpt_postattach, NULL);
  394 
  395 /******************************* Bus DMA Support ******************************/
  396 void
  397 mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  398 {
  399         struct mpt_map_info *map_info;
  400 
  401         map_info = (struct mpt_map_info *)arg;
  402         map_info->error = error;
  403         map_info->phys = segs->ds_addr;
  404 }
  405 
  406 /**************************** Reply/Event Handling ****************************/
  407 int
  408 mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
  409                      mpt_handler_t handler, uint32_t *phandler_id)
  410 {
  411 
  412         switch (type) {
  413         case MPT_HANDLER_REPLY:
  414         {
  415                 u_int cbi;
  416                 u_int free_cbi;
  417 
  418                 if (phandler_id == NULL)
  419                         return (EINVAL);
  420 
  421                 free_cbi = MPT_HANDLER_ID_NONE;
  422                 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
  423                         /*
  424                          * If the same handler is registered multiple
  425                          * times, don't error out.  Just return the
  426                          * index of the original registration.
  427                          */
  428                         if (mpt_reply_handlers[cbi] == handler.reply_handler) {
  429                                 *phandler_id = MPT_CBI_TO_HID(cbi);
  430                                 return (0);
  431                         }
  432 
  433                         /*
  434                          * Fill from the front in the hope that
  435                          * all registered handlers consume only a
  436                          * single cache line.
  437                          *
  438                          * We don't break on the first empty slot so
  439                          * that the full table is checked to see if
  440                          * this handler was previously registered.
  441                          */
  442                         if (free_cbi == MPT_HANDLER_ID_NONE &&
  443                             (mpt_reply_handlers[cbi]
  444                           == mpt_default_reply_handler))
  445                                 free_cbi = cbi;
  446                 }
  447                 if (free_cbi == MPT_HANDLER_ID_NONE) {
  448                         return (ENOMEM);
  449                 }
  450                 mpt_reply_handlers[free_cbi] = handler.reply_handler;
  451                 *phandler_id = MPT_CBI_TO_HID(free_cbi);
  452                 break;
  453         }
  454         default:
  455                 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
  456                 return (EINVAL);
  457         }
  458         return (0);
  459 }
  460 
  461 int
  462 mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
  463                        mpt_handler_t handler, uint32_t handler_id)
  464 {
  465 
  466         switch (type) {
  467         case MPT_HANDLER_REPLY:
  468         {
  469                 u_int cbi;
  470 
  471                 cbi = MPT_CBI(handler_id);
  472                 if (cbi >= MPT_NUM_REPLY_HANDLERS
  473                  || mpt_reply_handlers[cbi] != handler.reply_handler)
  474                         return (ENOENT);
  475                 mpt_reply_handlers[cbi] = mpt_default_reply_handler;
  476                 break;
  477         }
  478         default:
  479                 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
  480                 return (EINVAL);
  481         }
  482         return (0);
  483 }
  484 
  485 static int
  486 mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
  487         uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
  488 {
  489 
  490         mpt_prt(mpt,
  491             "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
  492             req, req->serno, reply_desc, reply_frame);
  493 
  494         if (reply_frame != NULL)
  495                 mpt_dump_reply_frame(mpt, reply_frame);
  496 
  497         mpt_prt(mpt, "Reply Frame Ignored\n");
  498 
  499         return (/*free_reply*/TRUE);
  500 }
  501 
  502 static int
  503 mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
  504  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
  505 {
  506 
  507         if (req != NULL) {
  508                 if (reply_frame != NULL) {
  509                         MSG_CONFIG *cfgp;
  510                         MSG_CONFIG_REPLY *reply;
  511 
  512                         cfgp = (MSG_CONFIG *)req->req_vbuf;
  513                         reply = (MSG_CONFIG_REPLY *)reply_frame;
  514                         req->IOCStatus = le16toh(reply_frame->IOCStatus);
  515                         bcopy(&reply->Header, &cfgp->Header,
  516                               sizeof(cfgp->Header));
  517                         cfgp->ExtPageLength = reply->ExtPageLength;
  518                         cfgp->ExtPageType = reply->ExtPageType;
  519                 }
  520                 req->state &= ~REQ_STATE_QUEUED;
  521                 req->state |= REQ_STATE_DONE;
  522                 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
  523                 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
  524                         wakeup(req);
  525                 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
  526                         /*
  527                          * Whew- we can free this request (late completion)
  528                          */
  529                         mpt_free_request(mpt, req);
  530                 }
  531         }
  532 
  533         return (TRUE);
  534 }
  535 
  536 static int
  537 mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
  538  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
  539 {
  540 
  541         /* Nothing to be done. */
  542         return (TRUE);
  543 }
  544 
  545 static int
  546 mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
  547     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
  548 {
  549         int free_reply;
  550 
  551         KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler"));
  552         KASSERT(req != NULL, ("null request in mpt_event_reply_handler"));
  553 
  554         free_reply = TRUE;
  555         switch (reply_frame->Function) {
  556         case MPI_FUNCTION_EVENT_NOTIFICATION:
  557         {
  558                 MSG_EVENT_NOTIFY_REPLY *msg;
  559                 struct mpt_personality *pers;
  560                 u_int handled;
  561 
  562                 handled = 0;
  563                 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
  564                 msg->EventDataLength = le16toh(msg->EventDataLength);
  565                 msg->IOCStatus = le16toh(msg->IOCStatus);
  566                 msg->IOCLogInfo = le32toh(msg->IOCLogInfo);
  567                 msg->Event = le32toh(msg->Event);
  568                 MPT_PERS_FOREACH(mpt, pers)
  569                         handled += pers->event(mpt, req, msg);
  570 
  571                 if (handled == 0 && mpt->mpt_pers_mask == 0) {
  572                         mpt_lprt(mpt, MPT_PRT_INFO,
  573                                 "No Handlers For Any Event Notify Frames. "
  574                                 "Event %#x (ACK %sequired).\n",
  575                                 msg->Event, msg->AckRequired? "r" : "not r");
  576                 } else if (handled == 0) {
  577                         mpt_lprt(mpt,
  578                                 msg->AckRequired? MPT_PRT_WARN : MPT_PRT_INFO,
  579                                 "Unhandled Event Notify Frame. Event %#x "
  580                                 "(ACK %sequired).\n",
  581                                 msg->Event, msg->AckRequired? "r" : "not r");
  582                 }
  583 
  584                 if (msg->AckRequired) {
  585                         request_t *ack_req;
  586                         uint32_t context;
  587 
  588                         context = req->index | MPT_REPLY_HANDLER_EVENTS;
  589                         ack_req = mpt_get_request(mpt, FALSE);
  590                         if (ack_req == NULL) {
  591                                 struct mpt_evtf_record *evtf;
  592 
  593                                 evtf = (struct mpt_evtf_record *)reply_frame;
  594                                 evtf->context = context;
  595                                 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
  596                                 free_reply = FALSE;
  597                                 break;
  598                         }
  599                         mpt_send_event_ack(mpt, ack_req, msg, context);
  600                         /*
  601                          * Don't check for CONTINUATION_REPLY here
  602                          */
  603                         return (free_reply);
  604                 }
  605                 break;
  606         }
  607         case MPI_FUNCTION_PORT_ENABLE:
  608                 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
  609                 break;
  610         case MPI_FUNCTION_EVENT_ACK:
  611                 break;
  612         default:
  613                 mpt_prt(mpt, "unknown event function: %x\n",
  614                         reply_frame->Function);
  615                 break;
  616         }
  617 
  618         /*
  619          * I'm not sure that this continuation stuff works as it should.
  620          *
  621          * I've had FC async events occur that free the frame up because
  622          * the continuation bit isn't set, and then additional async events
  623          * then occur using the same context. As you might imagine, this
  624          * leads to Very Bad Thing.
  625          *
  626          *  Let's just be safe for now and not free them up until we figure
  627          * out what's actually happening here.
  628          */
  629 #if     0
  630         if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
  631                 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
  632                 mpt_free_request(mpt, req);
  633                 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
  634                     reply_frame->Function, req, req->serno);
  635                 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
  636                         MSG_EVENT_NOTIFY_REPLY *msg =
  637                             (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
  638                         mpt_prtc(mpt, " Event=0x%x AckReq=%d",
  639                             msg->Event, msg->AckRequired);
  640                 }
  641         } else {
  642                 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
  643                     reply_frame->Function, req, req->serno);
  644                 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
  645                         MSG_EVENT_NOTIFY_REPLY *msg =
  646                             (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
  647                         mpt_prtc(mpt, " Event=0x%x AckReq=%d",
  648                             msg->Event, msg->AckRequired);
  649                 }
  650                 mpt_prtc(mpt, "\n");
  651         }
  652 #endif
  653         return (free_reply);
  654 }
  655 
  656 /*
  657  * Process an asynchronous event from the IOC.
  658  */
  659 static int
  660 mpt_core_event(struct mpt_softc *mpt, request_t *req,
  661                MSG_EVENT_NOTIFY_REPLY *msg)
  662 {
  663 
  664         mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
  665                  msg->Event & 0xFF);
  666         switch(msg->Event & 0xFF) {
  667         case MPI_EVENT_NONE:
  668                 break;
  669         case MPI_EVENT_LOG_DATA:
  670         {
  671                 int i;
  672 
  673                 /* Some error occurred that LSI wants logged */
  674                 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
  675                         msg->IOCLogInfo);
  676                 mpt_prt(mpt, "\tEvtLogData: Event Data:");
  677                 for (i = 0; i < msg->EventDataLength; i++)
  678                         mpt_prtc(mpt, "  %08x", msg->Data[i]);
  679                 mpt_prtc(mpt, "\n");
  680                 break;
  681         }
  682         case MPI_EVENT_EVENT_CHANGE:
  683                 /*
  684                  * This is just an acknowledgement
  685                  * of our mpt_send_event_request.
  686                  */
  687                 break;
  688         case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
  689                 break;
  690         default:
  691                 return (0);
  692                 break;
  693         }
  694         return (1);
  695 }
  696 
  697 static void
  698 mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
  699                    MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
  700 {
  701         MSG_EVENT_ACK *ackp;
  702 
  703         ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
  704         memset(ackp, 0, sizeof (*ackp));
  705         ackp->Function = MPI_FUNCTION_EVENT_ACK;
  706         ackp->Event = htole32(msg->Event);
  707         ackp->EventContext = htole32(msg->EventContext);
  708         ackp->MsgContext = htole32(context);
  709         mpt_check_doorbell(mpt);
  710         mpt_send_cmd(mpt, ack_req);
  711 }
  712 
  713 /***************************** Interrupt Handling *****************************/
  714 void
  715 mpt_intr(void *arg)
  716 {
  717         struct mpt_softc *mpt;
  718         uint32_t reply_desc;
  719         int ntrips = 0;
  720 
  721         mpt = (struct mpt_softc *)arg;
  722         mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
  723         MPT_LOCK_ASSERT(mpt);
  724 
  725         while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
  726                 request_t         *req;
  727                 MSG_DEFAULT_REPLY *reply_frame;
  728                 uint32_t           reply_baddr;
  729                 uint32_t           ctxt_idx;
  730                 u_int              cb_index;
  731                 u_int              req_index;
  732                 u_int              offset;
  733                 int                free_rf;
  734 
  735                 req = NULL;
  736                 reply_frame = NULL;
  737                 reply_baddr = 0;
  738                 offset = 0;
  739                 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
  740                         /*
  741                          * Ensure that the reply frame is coherent.
  742                          */
  743                         reply_baddr = MPT_REPLY_BADDR(reply_desc);
  744                         offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
  745                         bus_dmamap_sync_range(mpt->reply_dmat,
  746                             mpt->reply_dmap, offset, MPT_REPLY_SIZE,
  747                             BUS_DMASYNC_POSTREAD);
  748                         reply_frame = MPT_REPLY_OTOV(mpt, offset);
  749                         ctxt_idx = le32toh(reply_frame->MsgContext);
  750                 } else {
  751                         uint32_t type;
  752 
  753                         type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
  754                         ctxt_idx = reply_desc;
  755                         mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
  756                                     reply_desc);
  757 
  758                         switch (type) {
  759                         case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
  760                                 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
  761                                 break;
  762                         case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
  763                                 ctxt_idx = GET_IO_INDEX(reply_desc);
  764                                 if (mpt->tgt_cmd_ptrs == NULL) {
  765                                         mpt_prt(mpt,
  766                                             "mpt_intr: no target cmd ptrs\n");
  767                                         reply_desc = MPT_REPLY_EMPTY;
  768                                         break;
  769                                 }
  770                                 if (ctxt_idx >= mpt->tgt_cmds_allocated) {
  771                                         mpt_prt(mpt,
  772                                             "mpt_intr: bad tgt cmd ctxt %u\n",
  773                                             ctxt_idx);
  774                                         reply_desc = MPT_REPLY_EMPTY;
  775                                         ntrips = 1000;
  776                                         break;
  777                                 }
  778                                 req = mpt->tgt_cmd_ptrs[ctxt_idx];
  779                                 if (req == NULL) {
  780                                         mpt_prt(mpt, "no request backpointer "
  781                                             "at index %u", ctxt_idx);
  782                                         reply_desc = MPT_REPLY_EMPTY;
  783                                         ntrips = 1000;
  784                                         break;
  785                                 }
  786                                 /*
  787                                  * Reformulate ctxt_idx to be just as if
  788                                  * it were another type of context reply
  789                                  * so the code below will find the request
  790                                  * via indexing into the pool.
  791                                  */
  792                                 ctxt_idx =
  793                                     req->index | mpt->scsi_tgt_handler_id;
  794                                 req = NULL;
  795                                 break;
  796                         case MPI_CONTEXT_REPLY_TYPE_LAN:
  797                                 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
  798                                     reply_desc);
  799                                 reply_desc = MPT_REPLY_EMPTY;
  800                                 break;
  801                         default:
  802                                 mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
  803                                 reply_desc = MPT_REPLY_EMPTY;
  804                                 break;
  805                         }
  806                         if (reply_desc == MPT_REPLY_EMPTY) {
  807                                 if (ntrips++ > 1000) {
  808                                         break;
  809                                 }
  810                                 continue;
  811                         }
  812                 }
  813 
  814                 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
  815                 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
  816                 if (req_index < MPT_MAX_REQUESTS(mpt)) {
  817                         req = &mpt->request_pool[req_index];
  818                 } else {
  819                         mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
  820                             " 0x%x)\n", req_index, reply_desc);
  821                 }
  822 
  823                 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
  824                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  825                 free_rf = mpt_reply_handlers[cb_index](mpt, req,
  826                     reply_desc, reply_frame);
  827 
  828                 if (reply_frame != NULL && free_rf) {
  829                         bus_dmamap_sync_range(mpt->reply_dmat,
  830                             mpt->reply_dmap, offset, MPT_REPLY_SIZE,
  831                             BUS_DMASYNC_PREREAD);
  832                         mpt_free_reply(mpt, reply_baddr);
  833                 }
  834 
  835                 /*
  836                  * If we got ourselves disabled, don't get stuck in a loop
  837                  */
  838                 if (mpt->disabled) {
  839                         mpt_disable_ints(mpt);
  840                         break;
  841                 }
  842                 if (ntrips++ > 1000) {
  843                         break;
  844                 }
  845         }
  846         mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n");
  847 }
  848 
  849 /******************************* Error Recovery *******************************/
  850 void
  851 mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
  852                             u_int iocstatus)
  853 {
  854         MSG_DEFAULT_REPLY  ioc_status_frame;
  855         request_t         *req;
  856 
  857         memset(&ioc_status_frame, 0, sizeof(ioc_status_frame));
  858         ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
  859         ioc_status_frame.IOCStatus = iocstatus;
  860         while((req = TAILQ_FIRST(chain)) != NULL) {
  861                 MSG_REQUEST_HEADER *msg_hdr;
  862                 u_int               cb_index;
  863 
  864                 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
  865                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  866                 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
  867                 ioc_status_frame.Function = msg_hdr->Function;
  868                 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
  869                 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
  870                 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
  871                     &ioc_status_frame);
  872                 if (mpt_req_on_pending_list(mpt, req) != 0)
  873                         TAILQ_REMOVE(chain, req, links);
  874         }
  875 }
  876 
  877 /********************************* Diagnostics ********************************/
  878 /*
  879  * Perform a diagnostic dump of a reply frame.
  880  */
  881 void
  882 mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
  883 {
  884 
  885         mpt_prt(mpt, "Address Reply:\n");
  886         mpt_print_reply(reply_frame);
  887 }
  888 
  889 /******************************* Doorbell Access ******************************/
  890 static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
  891 static __inline  uint32_t mpt_rd_intr(struct mpt_softc *mpt);
  892 
  893 static __inline uint32_t
  894 mpt_rd_db(struct mpt_softc *mpt)
  895 {
  896 
  897         return mpt_read(mpt, MPT_OFFSET_DOORBELL);
  898 }
  899 
  900 static __inline uint32_t
  901 mpt_rd_intr(struct mpt_softc *mpt)
  902 {
  903 
  904         return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
  905 }
  906 
  907 /* Busy wait for a door bell to be read by IOC */
  908 static int
  909 mpt_wait_db_ack(struct mpt_softc *mpt)
  910 {
  911         int i;
  912 
  913         for (i=0; i < MPT_MAX_WAIT; i++) {
  914                 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
  915                         maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
  916                         return (MPT_OK);
  917                 }
  918                 DELAY(200);
  919         }
  920         return (MPT_FAIL);
  921 }
  922 
  923 /* Busy wait for a door bell interrupt */
  924 static int
  925 mpt_wait_db_int(struct mpt_softc *mpt)
  926 {
  927         int i;
  928 
  929         for (i = 0; i < MPT_MAX_WAIT; i++) {
  930                 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
  931                         maxwait_int = i > maxwait_int ? i : maxwait_int;
  932                         return MPT_OK;
  933                 }
  934                 DELAY(100);
  935         }
  936         return (MPT_FAIL);
  937 }
  938 
  939 /* Wait for IOC to transition to a give state */
  940 void
  941 mpt_check_doorbell(struct mpt_softc *mpt)
  942 {
  943         uint32_t db = mpt_rd_db(mpt);
  944 
  945         if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
  946                 mpt_prt(mpt, "Device not running\n");
  947                 mpt_print_db(db);
  948         }
  949 }
  950 
  951 /* Wait for IOC to transition to a give state */
  952 static int
  953 mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
  954 {
  955         int i;
  956 
  957         for (i = 0; i < MPT_MAX_WAIT; i++) {
  958                 uint32_t db = mpt_rd_db(mpt);
  959                 if (MPT_STATE(db) == state) {
  960                         maxwait_state = i > maxwait_state ? i : maxwait_state;
  961                         return (MPT_OK);
  962                 }
  963                 DELAY(100);
  964         }
  965         return (MPT_FAIL);
  966 }
  967 
  968 /************************* Initialization/Configuration ************************/
  969 static int mpt_download_fw(struct mpt_softc *mpt);
  970 
  971 /* Issue the reset COMMAND to the IOC */
  972 static int
  973 mpt_soft_reset(struct mpt_softc *mpt)
  974 {
  975 
  976         mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
  977 
  978         /* Have to use hard reset if we are not in Running state */
  979         if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
  980                 mpt_prt(mpt, "soft reset failed: device not running\n");
  981                 return (MPT_FAIL);
  982         }
  983 
  984         /* If door bell is in use we don't have a chance of getting
  985          * a word in since the IOC probably crashed in message
  986          * processing. So don't waste our time.
  987          */
  988         if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
  989                 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
  990                 return (MPT_FAIL);
  991         }
  992 
  993         /* Send the reset request to the IOC */
  994         mpt_write(mpt, MPT_OFFSET_DOORBELL,
  995             MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
  996         if (mpt_wait_db_ack(mpt) != MPT_OK) {
  997                 mpt_prt(mpt, "soft reset failed: ack timeout\n");
  998                 return (MPT_FAIL);
  999         }
 1000 
 1001         /* Wait for the IOC to reload and come out of reset state */
 1002         if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
 1003                 mpt_prt(mpt, "soft reset failed: device did not restart\n");
 1004                 return (MPT_FAIL);
 1005         }
 1006 
 1007         return MPT_OK;
 1008 }
 1009 
 1010 static int
 1011 mpt_enable_diag_mode(struct mpt_softc *mpt)
 1012 {
 1013         int try;
 1014 
 1015         try = 20;
 1016         while (--try) {
 1017                 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
 1018                         break;
 1019 
 1020                 /* Enable diagnostic registers */
 1021                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
 1022                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
 1023                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
 1024                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
 1025                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
 1026                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
 1027 
 1028                 DELAY(100000);
 1029         }
 1030         if (try == 0)
 1031                 return (EIO);
 1032         return (0);
 1033 }
 1034 
 1035 static void
 1036 mpt_disable_diag_mode(struct mpt_softc *mpt)
 1037 {
 1038 
 1039         mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
 1040 }
 1041 
 1042 /* This is a magic diagnostic reset that resets all the ARM
 1043  * processors in the chip.
 1044  */
 1045 static void
 1046 mpt_hard_reset(struct mpt_softc *mpt)
 1047 {
 1048         int error;
 1049         int wait;
 1050         uint32_t diagreg;
 1051 
 1052         mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
 1053 
 1054         if (mpt->is_1078) {
 1055                 mpt_write(mpt, MPT_OFFSET_RESET_1078, 0x07);
 1056                 DELAY(1000);
 1057                 return;
 1058         }
 1059 
 1060         error = mpt_enable_diag_mode(mpt);
 1061         if (error) {
 1062                 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
 1063                 mpt_prt(mpt, "Trying to reset anyway.\n");
 1064         }
 1065 
 1066         diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
 1067 
 1068         /*
 1069          * This appears to be a workaround required for some
 1070          * firmware or hardware revs.
 1071          */
 1072         mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
 1073         DELAY(1000);
 1074 
 1075         /* Diag. port is now active so we can now hit the reset bit */
 1076         mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
 1077 
 1078         /*
 1079          * Ensure that the reset has finished.  We delay 1ms
 1080          * prior to reading the register to make sure the chip
 1081          * has sufficiently completed its reset to handle register
 1082          * accesses.
 1083          */
 1084         wait = 5000;
 1085         do {
 1086                 DELAY(1000);
 1087                 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
 1088         } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
 1089 
 1090         if (wait == 0) {
 1091                 mpt_prt(mpt, "WARNING - Failed hard reset! "
 1092                         "Trying to initialize anyway.\n");
 1093         }
 1094 
 1095         /*
 1096          * If we have firmware to download, it must be loaded before
 1097          * the controller will become operational.  Do so now.
 1098          */
 1099         if (mpt->fw_image != NULL) {
 1100                 error = mpt_download_fw(mpt);
 1101 
 1102                 if (error) {
 1103                         mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
 1104                         mpt_prt(mpt, "Trying to initialize anyway.\n");
 1105                 }
 1106         }
 1107 
 1108         /*
 1109          * Reseting the controller should have disabled write
 1110          * access to the diagnostic registers, but disable
 1111          * manually to be sure.
 1112          */
 1113         mpt_disable_diag_mode(mpt);
 1114 }
 1115 
 1116 static void
 1117 mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
 1118 {
 1119 
 1120         /*
 1121          * Complete all pending requests with a status
 1122          * appropriate for an IOC reset.
 1123          */
 1124         mpt_complete_request_chain(mpt, &mpt->request_pending_list,
 1125                                    MPI_IOCSTATUS_INVALID_STATE);
 1126 }
 1127 
 1128 /*
 1129  * Reset the IOC when needed. Try software command first then if needed
 1130  * poke at the magic diagnostic reset. Note that a hard reset resets
 1131  * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
 1132  * fouls up the PCI configuration registers.
 1133  */
 1134 int
 1135 mpt_reset(struct mpt_softc *mpt, int reinit)
 1136 {
 1137         struct  mpt_personality *pers;
 1138         int     ret;
 1139         int     retry_cnt = 0;
 1140 
 1141         /*
 1142          * Try a soft reset. If that fails, get out the big hammer.
 1143          */
 1144  again:
 1145         if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
 1146                 int     cnt;
 1147                 for (cnt = 0; cnt < 5; cnt++) {
 1148                         /* Failed; do a hard reset */
 1149                         mpt_hard_reset(mpt);
 1150 
 1151                         /*
 1152                          * Wait for the IOC to reload
 1153                          * and come out of reset state
 1154                          */
 1155                         ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
 1156                         if (ret == MPT_OK) {
 1157                                 break;
 1158                         }
 1159                         /*
 1160                          * Okay- try to check again...
 1161                          */
 1162                         ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
 1163                         if (ret == MPT_OK) {
 1164                                 break;
 1165                         }
 1166                         mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
 1167                             retry_cnt, cnt);
 1168                 }
 1169         }
 1170 
 1171         if (retry_cnt == 0) {
 1172                 /*
 1173                  * Invoke reset handlers.  We bump the reset count so
 1174                  * that mpt_wait_req() understands that regardless of
 1175                  * the specified wait condition, it should stop its wait.
 1176                  */
 1177                 mpt->reset_cnt++;
 1178                 MPT_PERS_FOREACH(mpt, pers)
 1179                         pers->reset(mpt, ret);
 1180         }
 1181 
 1182         if (reinit) {
 1183                 ret = mpt_enable_ioc(mpt, 1);
 1184                 if (ret == MPT_OK) {
 1185                         mpt_enable_ints(mpt);
 1186                 }
 1187         }
 1188         if (ret != MPT_OK && retry_cnt++ < 2) {
 1189                 goto again;
 1190         }
 1191         return ret;
 1192 }
 1193 
 1194 /* Return a command buffer to the free queue */
 1195 void
 1196 mpt_free_request(struct mpt_softc *mpt, request_t *req)
 1197 {
 1198         request_t *nxt;
 1199         struct mpt_evtf_record *record;
 1200         uint32_t offset, reply_baddr;
 1201 
 1202         if (req == NULL || req != &mpt->request_pool[req->index]) {
 1203                 panic("mpt_free_request: bad req ptr");
 1204         }
 1205         if ((nxt = req->chain) != NULL) {
 1206                 req->chain = NULL;
 1207                 mpt_free_request(mpt, nxt);     /* NB: recursion */
 1208         }
 1209         KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
 1210         KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
 1211         MPT_LOCK_ASSERT(mpt);
 1212         KASSERT(mpt_req_on_free_list(mpt, req) == 0,
 1213             ("mpt_free_request: req %p:%u func %x already on freelist",
 1214             req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
 1215         KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
 1216             ("mpt_free_request: req %p:%u func %x on pending list",
 1217             req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
 1218 #ifdef  INVARIANTS
 1219         mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__);
 1220 #endif
 1221 
 1222         req->ccb = NULL;
 1223         if (LIST_EMPTY(&mpt->ack_frames)) {
 1224                 /*
 1225                  * Insert free ones at the tail
 1226                  */
 1227                 req->serno = 0;
 1228                 req->state = REQ_STATE_FREE;
 1229 #ifdef  INVARIANTS
 1230                 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER));
 1231 #endif
 1232                 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
 1233                 if (mpt->getreqwaiter != 0) {
 1234                         mpt->getreqwaiter = 0;
 1235                         wakeup(&mpt->request_free_list);
 1236                 }
 1237                 return;
 1238         }
 1239 
 1240         /*
 1241          * Process an ack frame deferred due to resource shortage.
 1242          */
 1243         record = LIST_FIRST(&mpt->ack_frames);
 1244         LIST_REMOVE(record, links);
 1245         req->state = REQ_STATE_ALLOCATED;
 1246         mpt_assign_serno(mpt, req);
 1247         mpt_send_event_ack(mpt, req, &record->reply, record->context);
 1248         offset = (uint32_t)((uint8_t *)record - mpt->reply);
 1249         reply_baddr = offset + (mpt->reply_phys & 0xFFFFFFFF);
 1250         bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap, offset,
 1251             MPT_REPLY_SIZE, BUS_DMASYNC_PREREAD);
 1252         mpt_free_reply(mpt, reply_baddr);
 1253 }
 1254 
 1255 /* Get a command buffer from the free queue */
 1256 request_t *
 1257 mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
 1258 {
 1259         request_t *req;
 1260 
 1261 retry:
 1262         MPT_LOCK_ASSERT(mpt);
 1263         req = TAILQ_FIRST(&mpt->request_free_list);
 1264         if (req != NULL) {
 1265                 KASSERT(req == &mpt->request_pool[req->index],
 1266                     ("mpt_get_request: corrupted request free list"));
 1267                 KASSERT(req->state == REQ_STATE_FREE,
 1268                     ("req %p:%u not free on free list %x index %d function %x",
 1269                     req, req->serno, req->state, req->index,
 1270                     ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
 1271                 TAILQ_REMOVE(&mpt->request_free_list, req, links);
 1272                 req->state = REQ_STATE_ALLOCATED;
 1273                 req->chain = NULL;
 1274                 mpt_assign_serno(mpt, req);
 1275         } else if (sleep_ok != 0) {
 1276                 mpt->getreqwaiter = 1;
 1277                 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
 1278                 goto retry;
 1279         }
 1280         return (req);
 1281 }
 1282 
 1283 /* Pass the command to the IOC */
 1284 void
 1285 mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
 1286 {
 1287 
 1288         if (mpt->verbose > MPT_PRT_DEBUG2) {
 1289                 mpt_dump_request(mpt, req);
 1290         }
 1291         bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
 1292             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1293         req->state |= REQ_STATE_QUEUED;
 1294         KASSERT(mpt_req_on_free_list(mpt, req) == 0,
 1295             ("req %p:%u func %x on freelist list in mpt_send_cmd",
 1296             req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
 1297         KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
 1298             ("req %p:%u func %x already on pending list in mpt_send_cmd",
 1299             req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
 1300         TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
 1301         mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
 1302 }
 1303 
 1304 /*
 1305  * Wait for a request to complete.
 1306  *
 1307  * Inputs:
 1308  *      mpt             softc of controller executing request
 1309  *      req             request to wait for
 1310  *      sleep_ok        nonzero implies may sleep in this context
 1311  *      time_ms         timeout in ms.  0 implies no timeout.
 1312  *
 1313  * Return Values:
 1314  *      0               Request completed
 1315  *      non-0           Timeout fired before request completion.
 1316  */
 1317 int
 1318 mpt_wait_req(struct mpt_softc *mpt, request_t *req,
 1319              mpt_req_state_t state, mpt_req_state_t mask,
 1320              int sleep_ok, int time_ms)
 1321 {
 1322         int   timeout;
 1323         u_int saved_cnt;
 1324         sbintime_t sbt;
 1325 
 1326         /*
 1327          * time_ms is in ms, 0 indicates infinite wait.
 1328          * Convert to sbintime_t or 500us units depending on
 1329          * our sleep mode.
 1330          */
 1331         if (sleep_ok != 0) {
 1332                 sbt = SBT_1MS * time_ms;
 1333                 /* Set timeout as well so final timeout check works. */
 1334                 timeout = time_ms;
 1335         } else {
 1336                 sbt = 0; /* Squelch bogus gcc warning. */
 1337                 timeout = time_ms * 2;
 1338         }
 1339         req->state |= REQ_STATE_NEED_WAKEUP;
 1340         mask &= ~REQ_STATE_NEED_WAKEUP;
 1341         saved_cnt = mpt->reset_cnt;
 1342         while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
 1343                 if (sleep_ok != 0) {
 1344                         if (mpt_sleep(mpt, req, PUSER, "mptreq", sbt) ==
 1345                             EWOULDBLOCK) {
 1346                                 timeout = 0;
 1347                                 break;
 1348                         }
 1349                 } else {
 1350                         if (time_ms != 0 && --timeout == 0) {
 1351                                 break;
 1352                         }
 1353                         DELAY(500);
 1354                         mpt_intr(mpt);
 1355                 }
 1356         }
 1357         req->state &= ~REQ_STATE_NEED_WAKEUP;
 1358         if (mpt->reset_cnt != saved_cnt) {
 1359                 return (EIO);
 1360         }
 1361         if (time_ms && timeout <= 0) {
 1362                 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
 1363                 req->state |= REQ_STATE_TIMEDOUT;
 1364                 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
 1365                 return (ETIMEDOUT);
 1366         }
 1367         return (0);
 1368 }
 1369 
 1370 /*
 1371  * Send a command to the IOC via the handshake register.
 1372  *
 1373  * Only done at initialization time and for certain unusual
 1374  * commands such as device/bus reset as specified by LSI.
 1375  */
 1376 int
 1377 mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
 1378 {
 1379         int i;
 1380         uint32_t data, *data32;
 1381 
 1382         /* Check condition of the IOC */
 1383         data = mpt_rd_db(mpt);
 1384         if ((MPT_STATE(data) != MPT_DB_STATE_READY
 1385           && MPT_STATE(data) != MPT_DB_STATE_RUNNING
 1386           && MPT_STATE(data) != MPT_DB_STATE_FAULT)
 1387          || MPT_DB_IS_IN_USE(data)) {
 1388                 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
 1389                 mpt_print_db(data);
 1390                 return (EBUSY);
 1391         }
 1392 
 1393         /* We move things in 32 bit chunks */
 1394         len = (len + 3) >> 2;
 1395         data32 = cmd;
 1396 
 1397         /* Clear any left over pending doorbell interrupts */
 1398         if (MPT_DB_INTR(mpt_rd_intr(mpt)))
 1399                 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1400 
 1401         /*
 1402          * Tell the handshake reg. we are going to send a command
 1403          * and how long it is going to be.
 1404          */
 1405         data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
 1406             (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
 1407         mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
 1408 
 1409         /* Wait for the chip to notice */
 1410         if (mpt_wait_db_int(mpt) != MPT_OK) {
 1411                 mpt_prt(mpt, "mpt_send_handshake_cmd: db ignored\n");
 1412                 return (ETIMEDOUT);
 1413         }
 1414 
 1415         /* Clear the interrupt */
 1416         mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1417 
 1418         if (mpt_wait_db_ack(mpt) != MPT_OK) {
 1419                 mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n");
 1420                 return (ETIMEDOUT);
 1421         }
 1422 
 1423         /* Send the command */
 1424         for (i = 0; i < len; i++) {
 1425                 mpt_write_stream(mpt, MPT_OFFSET_DOORBELL, *data32++);
 1426                 if (mpt_wait_db_ack(mpt) != MPT_OK) {
 1427                         mpt_prt(mpt,
 1428                             "mpt_send_handshake_cmd: timeout @ index %d\n", i);
 1429                         return (ETIMEDOUT);
 1430                 }
 1431         }
 1432         return MPT_OK;
 1433 }
 1434 
 1435 /* Get the response from the handshake register */
 1436 int
 1437 mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
 1438 {
 1439         int left, reply_left;
 1440         u_int16_t *data16;
 1441         uint32_t data;
 1442         MSG_DEFAULT_REPLY *hdr;
 1443 
 1444         /* We move things out in 16 bit chunks */
 1445         reply_len >>= 1;
 1446         data16 = (u_int16_t *)reply;
 1447 
 1448         hdr = (MSG_DEFAULT_REPLY *)reply;
 1449 
 1450         /* Get first word */
 1451         if (mpt_wait_db_int(mpt) != MPT_OK) {
 1452                 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
 1453                 return ETIMEDOUT;
 1454         }
 1455         data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
 1456         *data16++ = le16toh(data & MPT_DB_DATA_MASK);
 1457         mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1458 
 1459         /* Get second word */
 1460         if (mpt_wait_db_int(mpt) != MPT_OK) {
 1461                 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
 1462                 return ETIMEDOUT;
 1463         }
 1464         data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
 1465         *data16++ = le16toh(data & MPT_DB_DATA_MASK);
 1466         mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1467 
 1468         /*
 1469          * With the second word, we can now look at the length.
 1470          * Warn about a reply that's too short (except for IOC FACTS REPLY)
 1471          */
 1472         if ((reply_len >> 1) != hdr->MsgLength &&
 1473             (hdr->Function != MPI_FUNCTION_IOC_FACTS)){
 1474                 mpt_prt(mpt, "reply length does not match message length: "
 1475                         "got %x; expected %zx for function %x\n",
 1476                         hdr->MsgLength << 2, reply_len << 1, hdr->Function);
 1477         }
 1478 
 1479         /* Get rest of the reply; but don't overflow the provided buffer */
 1480         left = (hdr->MsgLength << 1) - 2;
 1481         reply_left =  reply_len - 2;
 1482         while (left--) {
 1483                 if (mpt_wait_db_int(mpt) != MPT_OK) {
 1484                         mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
 1485                         return ETIMEDOUT;
 1486                 }
 1487                 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
 1488                 if (reply_left-- > 0)
 1489                         *data16++ = le16toh(data & MPT_DB_DATA_MASK);
 1490                 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1491         }
 1492 
 1493         /* One more wait & clear at the end */
 1494         if (mpt_wait_db_int(mpt) != MPT_OK) {
 1495                 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
 1496                 return ETIMEDOUT;
 1497         }
 1498         mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1499 
 1500         if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
 1501                 if (mpt->verbose >= MPT_PRT_TRACE)
 1502                         mpt_print_reply(hdr);
 1503                 return (MPT_FAIL | hdr->IOCStatus);
 1504         }
 1505 
 1506         return (0);
 1507 }
 1508 
 1509 static int
 1510 mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
 1511 {
 1512         MSG_IOC_FACTS f_req;
 1513         int error;
 1514 
 1515         memset(&f_req, 0, sizeof f_req);
 1516         f_req.Function = MPI_FUNCTION_IOC_FACTS;
 1517         f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
 1518         error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
 1519         if (error) {
 1520                 return(error);
 1521         }
 1522         error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
 1523         return (error);
 1524 }
 1525 
 1526 static int
 1527 mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp)
 1528 {
 1529         MSG_PORT_FACTS f_req;
 1530         int error;
 1531 
 1532         memset(&f_req, 0, sizeof f_req);
 1533         f_req.Function = MPI_FUNCTION_PORT_FACTS;
 1534         f_req.PortNumber = port;
 1535         f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
 1536         error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
 1537         if (error) {
 1538                 return(error);
 1539         }
 1540         error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
 1541         return (error);
 1542 }
 1543 
 1544 /*
 1545  * Send the initialization request. This is where we specify how many
 1546  * SCSI buses and how many devices per bus we wish to emulate.
 1547  * This is also the command that specifies the max size of the reply
 1548  * frames from the IOC that we will be allocating.
 1549  */
 1550 static int
 1551 mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
 1552 {
 1553         int error = 0;
 1554         MSG_IOC_INIT init;
 1555         MSG_IOC_INIT_REPLY reply;
 1556 
 1557         memset(&init, 0, sizeof init);
 1558         init.WhoInit = who;
 1559         init.Function = MPI_FUNCTION_IOC_INIT;
 1560         init.MaxDevices = 0;    /* at least 256 devices per bus */
 1561         init.MaxBuses = 16;     /* at least 16 buses */
 1562 
 1563         init.MsgVersion = htole16(MPI_VERSION);
 1564         init.HeaderVersion = htole16(MPI_HEADER_VERSION);
 1565         init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
 1566         init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
 1567 
 1568         if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
 1569                 return(error);
 1570         }
 1571 
 1572         error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
 1573         return (error);
 1574 }
 1575 
 1576 /*
 1577  * Utiltity routine to read configuration headers and pages
 1578  */
 1579 int
 1580 mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, cfgparms_t *params,
 1581                   bus_addr_t addr, bus_size_t len, int sleep_ok, int timeout_ms)
 1582 {
 1583         MSG_CONFIG *cfgp;
 1584         SGE_SIMPLE32 *se;
 1585 
 1586         cfgp = req->req_vbuf;
 1587         memset(cfgp, 0, sizeof *cfgp);
 1588         cfgp->Action = params->Action;
 1589         cfgp->Function = MPI_FUNCTION_CONFIG;
 1590         cfgp->Header.PageVersion = params->PageVersion;
 1591         cfgp->Header.PageNumber = params->PageNumber;
 1592         cfgp->PageAddress = htole32(params->PageAddress);
 1593         if ((params->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
 1594             MPI_CONFIG_PAGETYPE_EXTENDED) {
 1595                 cfgp->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
 1596                 cfgp->Header.PageLength = 0;
 1597                 cfgp->ExtPageLength = htole16(params->ExtPageLength);
 1598                 cfgp->ExtPageType = params->ExtPageType;
 1599         } else {
 1600                 cfgp->Header.PageType = params->PageType;
 1601                 cfgp->Header.PageLength = params->PageLength;
 1602         }
 1603         se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
 1604         se->Address = htole32(addr);
 1605         MPI_pSGE_SET_LENGTH(se, len);
 1606         MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
 1607             MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
 1608             MPI_SGE_FLAGS_END_OF_LIST |
 1609             ((params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
 1610           || params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
 1611            ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
 1612         se->FlagsLength = htole32(se->FlagsLength);
 1613         cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
 1614 
 1615         mpt_check_doorbell(mpt);
 1616         mpt_send_cmd(mpt, req);
 1617         return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
 1618                              sleep_ok, timeout_ms));
 1619 }
 1620 
 1621 int
 1622 mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, int PageNumber,
 1623                        uint32_t PageAddress, int ExtPageType,
 1624                        CONFIG_EXTENDED_PAGE_HEADER *rslt,
 1625                        int sleep_ok, int timeout_ms)
 1626 {
 1627         request_t  *req;
 1628         cfgparms_t params;
 1629         MSG_CONFIG_REPLY *cfgp;
 1630         int         error;
 1631 
 1632         req = mpt_get_request(mpt, sleep_ok);
 1633         if (req == NULL) {
 1634                 mpt_prt(mpt, "mpt_extread_cfg_header: Get request failed!\n");
 1635                 return (ENOMEM);
 1636         }
 1637 
 1638         params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
 1639         params.PageVersion = PageVersion;
 1640         params.PageLength = 0;
 1641         params.PageNumber = PageNumber;
 1642         params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
 1643         params.PageAddress = PageAddress;
 1644         params.ExtPageType = ExtPageType;
 1645         params.ExtPageLength = 0;
 1646         error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
 1647                                   sleep_ok, timeout_ms);
 1648         if (error != 0) {
 1649                 /*
 1650                  * Leave the request. Without resetting the chip, it's
 1651                  * still owned by it and we'll just get into trouble
 1652                  * freeing it now. Mark it as abandoned so that if it
 1653                  * shows up later it can be freed.
 1654                  */
 1655                 mpt_prt(mpt, "read_extcfg_header timed out\n");
 1656                 return (ETIMEDOUT);
 1657         }
 1658 
 1659         switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
 1660         case MPI_IOCSTATUS_SUCCESS:
 1661                 cfgp = req->req_vbuf;
 1662                 rslt->PageVersion = cfgp->Header.PageVersion;
 1663                 rslt->PageNumber = cfgp->Header.PageNumber;
 1664                 rslt->PageType = cfgp->Header.PageType;
 1665                 rslt->ExtPageLength = le16toh(cfgp->ExtPageLength);
 1666                 rslt->ExtPageType = cfgp->ExtPageType;
 1667                 error = 0;
 1668                 break;
 1669         case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
 1670                 mpt_lprt(mpt, MPT_PRT_DEBUG,
 1671                     "Invalid Page Type %d Number %d Addr 0x%0x\n",
 1672                     MPI_CONFIG_PAGETYPE_EXTENDED, PageNumber, PageAddress);
 1673                 error = EINVAL;
 1674                 break;
 1675         default:
 1676                 mpt_prt(mpt, "mpt_read_extcfg_header: Config Info Status %x\n",
 1677                         req->IOCStatus);
 1678                 error = EIO;
 1679                 break;
 1680         }
 1681         mpt_free_request(mpt, req);
 1682         return (error);
 1683 }
 1684 
 1685 int
 1686 mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
 1687                      CONFIG_EXTENDED_PAGE_HEADER *hdr, void *buf, size_t len,
 1688                      int sleep_ok, int timeout_ms)
 1689 {
 1690         request_t    *req;
 1691         cfgparms_t    params;
 1692         int           error;
 1693 
 1694         req = mpt_get_request(mpt, sleep_ok);
 1695         if (req == NULL) {
 1696                 mpt_prt(mpt, "mpt_read_extcfg_page: Get request failed!\n");
 1697                 return (-1);
 1698         }
 1699 
 1700         params.Action = Action;
 1701         params.PageVersion = hdr->PageVersion;
 1702         params.PageLength = 0;
 1703         params.PageNumber = hdr->PageNumber;
 1704         params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
 1705         params.PageAddress = PageAddress;
 1706         params.ExtPageType = hdr->ExtPageType;
 1707         params.ExtPageLength = hdr->ExtPageLength;
 1708         error = mpt_issue_cfg_req(mpt, req, &params,
 1709                                   req->req_pbuf + MPT_RQSL(mpt),
 1710                                   len, sleep_ok, timeout_ms);
 1711         if (error != 0) {
 1712                 mpt_prt(mpt, "read_extcfg_page(%d) timed out\n", Action);
 1713                 return (-1);
 1714         }
 1715 
 1716         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
 1717                 mpt_prt(mpt, "mpt_read_extcfg_page: Config Info Status %x\n",
 1718                         req->IOCStatus);
 1719                 mpt_free_request(mpt, req);
 1720                 return (-1);
 1721         }
 1722         memcpy(buf, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
 1723         mpt_free_request(mpt, req);
 1724         return (0);
 1725 }
 1726 
 1727 int
 1728 mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
 1729                     uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
 1730                     int sleep_ok, int timeout_ms)
 1731 {
 1732         request_t  *req;
 1733         cfgparms_t params;
 1734         MSG_CONFIG *cfgp;
 1735         int         error;
 1736 
 1737         req = mpt_get_request(mpt, sleep_ok);
 1738         if (req == NULL) {
 1739                 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
 1740                 return (ENOMEM);
 1741         }
 1742 
 1743         params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
 1744         params.PageVersion = 0;
 1745         params.PageLength = 0;
 1746         params.PageNumber = PageNumber;
 1747         params.PageType = PageType;
 1748         params.PageAddress = PageAddress;
 1749         error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
 1750                                   sleep_ok, timeout_ms);
 1751         if (error != 0) {
 1752                 /*
 1753                  * Leave the request. Without resetting the chip, it's
 1754                  * still owned by it and we'll just get into trouble
 1755                  * freeing it now. Mark it as abandoned so that if it
 1756                  * shows up later it can be freed.
 1757                  */
 1758                 mpt_prt(mpt, "read_cfg_header timed out\n");
 1759                 return (ETIMEDOUT);
 1760         }
 1761 
 1762         switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
 1763         case MPI_IOCSTATUS_SUCCESS:
 1764                 cfgp = req->req_vbuf;
 1765                 bcopy(&cfgp->Header, rslt, sizeof(*rslt));
 1766                 error = 0;
 1767                 break;
 1768         case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
 1769                 mpt_lprt(mpt, MPT_PRT_DEBUG,
 1770                     "Invalid Page Type %d Number %d Addr 0x%0x\n",
 1771                     PageType, PageNumber, PageAddress);
 1772                 error = EINVAL;
 1773                 break;
 1774         default:
 1775                 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
 1776                         req->IOCStatus);
 1777                 error = EIO;
 1778                 break;
 1779         }
 1780         mpt_free_request(mpt, req);
 1781         return (error);
 1782 }
 1783 
 1784 int
 1785 mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
 1786                   CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
 1787                   int timeout_ms)
 1788 {
 1789         request_t    *req;
 1790         cfgparms_t    params;
 1791         int           error;
 1792 
 1793         req = mpt_get_request(mpt, sleep_ok);
 1794         if (req == NULL) {
 1795                 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
 1796                 return (-1);
 1797         }
 1798 
 1799         params.Action = Action;
 1800         params.PageVersion = hdr->PageVersion;
 1801         params.PageLength = hdr->PageLength;
 1802         params.PageNumber = hdr->PageNumber;
 1803         params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
 1804         params.PageAddress = PageAddress;
 1805         error = mpt_issue_cfg_req(mpt, req, &params,
 1806                                   req->req_pbuf + MPT_RQSL(mpt),
 1807                                   len, sleep_ok, timeout_ms);
 1808         if (error != 0) {
 1809                 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
 1810                 return (-1);
 1811         }
 1812 
 1813         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
 1814                 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
 1815                         req->IOCStatus);
 1816                 mpt_free_request(mpt, req);
 1817                 return (-1);
 1818         }
 1819         memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
 1820         mpt_free_request(mpt, req);
 1821         return (0);
 1822 }
 1823 
 1824 int
 1825 mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
 1826                    CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
 1827                    int timeout_ms)
 1828 {
 1829         request_t    *req;
 1830         cfgparms_t    params;
 1831         u_int         hdr_attr;
 1832         int           error;
 1833 
 1834         hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
 1835         if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
 1836             hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
 1837                 mpt_prt(mpt, "page type 0x%x not changeable\n",
 1838                         hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
 1839                 return (-1);
 1840         }
 1841 
 1842 #if     0
 1843         /*
 1844          * We shouldn't mask off other bits here.
 1845          */
 1846         hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK;
 1847 #endif
 1848 
 1849         req = mpt_get_request(mpt, sleep_ok);
 1850         if (req == NULL)
 1851                 return (-1);
 1852 
 1853         memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len);
 1854 
 1855         /*
 1856          * There isn't any point in restoring stripped out attributes
 1857          * if you then mask them going down to issue the request.
 1858          */
 1859 
 1860         params.Action = Action;
 1861         params.PageVersion = hdr->PageVersion;
 1862         params.PageLength = hdr->PageLength;
 1863         params.PageNumber = hdr->PageNumber;
 1864         params.PageAddress = PageAddress;
 1865 #if     0
 1866         /* Restore stripped out attributes */
 1867         hdr->PageType |= hdr_attr;
 1868         params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
 1869 #else
 1870         params.PageType = hdr->PageType;
 1871 #endif
 1872         error = mpt_issue_cfg_req(mpt, req, &params,
 1873                                   req->req_pbuf + MPT_RQSL(mpt),
 1874                                   len, sleep_ok, timeout_ms);
 1875         if (error != 0) {
 1876                 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
 1877                 return (-1);
 1878         }
 1879 
 1880         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
 1881                 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
 1882                         req->IOCStatus);
 1883                 mpt_free_request(mpt, req);
 1884                 return (-1);
 1885         }
 1886         mpt_free_request(mpt, req);
 1887         return (0);
 1888 }
 1889 
 1890 /*
 1891  * Read IOC configuration information
 1892  */
 1893 static int
 1894 mpt_read_config_info_ioc(struct mpt_softc *mpt)
 1895 {
 1896         CONFIG_PAGE_HEADER hdr;
 1897         struct mpt_raid_volume *mpt_raid;
 1898         int rv;
 1899         int i;
 1900         size_t len;
 1901 
 1902         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
 1903                 2, 0, &hdr, FALSE, 5000);
 1904         /*
 1905          * If it's an invalid page, so what? Not a supported function....
 1906          */
 1907         if (rv == EINVAL) {
 1908                 return (0);
 1909         }
 1910         if (rv) {
 1911                 return (rv);
 1912         }
 1913 
 1914         mpt_lprt(mpt, MPT_PRT_DEBUG,
 1915             "IOC Page 2 Header: Version %x len %x PageNumber %x PageType %x\n",
 1916             hdr.PageVersion, hdr.PageLength << 2,
 1917             hdr.PageNumber, hdr.PageType);
 1918 
 1919         len = hdr.PageLength * sizeof(uint32_t);
 1920         mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 1921         if (mpt->ioc_page2 == NULL) {
 1922                 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
 1923                 mpt_raid_free_mem(mpt);
 1924                 return (ENOMEM);
 1925         }
 1926         memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
 1927         rv = mpt_read_cur_cfg_page(mpt, 0,
 1928             &mpt->ioc_page2->Header, len, FALSE, 5000);
 1929         if (rv) {
 1930                 mpt_prt(mpt, "failed to read IOC Page 2\n");
 1931                 mpt_raid_free_mem(mpt);
 1932                 return (EIO);
 1933         }
 1934         mpt2host_config_page_ioc2(mpt->ioc_page2);
 1935 
 1936         if (mpt->ioc_page2->CapabilitiesFlags != 0) {
 1937                 uint32_t mask;
 1938 
 1939                 mpt_prt(mpt, "Capabilities: (");
 1940                 for (mask = 1; mask != 0; mask <<= 1) {
 1941                         if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
 1942                                 continue;
 1943                         }
 1944                         switch (mask) {
 1945                         case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
 1946                                 mpt_prtc(mpt, " RAID-0");
 1947                                 break;
 1948                         case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
 1949                                 mpt_prtc(mpt, " RAID-1E");
 1950                                 break;
 1951                         case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
 1952                                 mpt_prtc(mpt, " RAID-1");
 1953                                 break;
 1954                         case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
 1955                                 mpt_prtc(mpt, " SES");
 1956                                 break;
 1957                         case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
 1958                                 mpt_prtc(mpt, " SAFTE");
 1959                                 break;
 1960                         case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
 1961                                 mpt_prtc(mpt, " Multi-Channel-Arrays");
 1962                         default:
 1963                                 break;
 1964                         }
 1965                 }
 1966                 mpt_prtc(mpt, " )\n");
 1967                 if ((mpt->ioc_page2->CapabilitiesFlags
 1968                    & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
 1969                     | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
 1970                     | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
 1971                         mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
 1972                                 mpt->ioc_page2->NumActiveVolumes,
 1973                                 mpt->ioc_page2->NumActiveVolumes != 1
 1974                               ? "s " : " ",
 1975                                 mpt->ioc_page2->MaxVolumes);
 1976                         mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
 1977                                 mpt->ioc_page2->NumActivePhysDisks,
 1978                                 mpt->ioc_page2->NumActivePhysDisks != 1
 1979                               ? "s " : " ",
 1980                                 mpt->ioc_page2->MaxPhysDisks);
 1981                 }
 1982         }
 1983 
 1984         len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
 1985         mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 1986         if (mpt->raid_volumes == NULL) {
 1987                 mpt_prt(mpt, "Could not allocate RAID volume data\n");
 1988                 mpt_raid_free_mem(mpt);
 1989                 return (ENOMEM);
 1990         }
 1991 
 1992         /*
 1993          * Copy critical data out of ioc_page2 so that we can
 1994          * safely refresh the page without windows of unreliable
 1995          * data.
 1996          */
 1997         mpt->raid_max_volumes =  mpt->ioc_page2->MaxVolumes;
 1998 
 1999         len = sizeof(*mpt->raid_volumes->config_page) +
 2000             (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
 2001         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
 2002                 mpt_raid = &mpt->raid_volumes[i];
 2003                 mpt_raid->config_page =
 2004                     malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 2005                 if (mpt_raid->config_page == NULL) {
 2006                         mpt_prt(mpt, "Could not allocate RAID page data\n");
 2007                         mpt_raid_free_mem(mpt);
 2008                         return (ENOMEM);
 2009                 }
 2010         }
 2011         mpt->raid_page0_len = len;
 2012 
 2013         len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
 2014         mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 2015         if (mpt->raid_disks == NULL) {
 2016                 mpt_prt(mpt, "Could not allocate RAID disk data\n");
 2017                 mpt_raid_free_mem(mpt);
 2018                 return (ENOMEM);
 2019         }
 2020         mpt->raid_max_disks =  mpt->ioc_page2->MaxPhysDisks;
 2021 
 2022         /*
 2023          * Load page 3.
 2024          */
 2025         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
 2026             3, 0, &hdr, FALSE, 5000);
 2027         if (rv) {
 2028                 mpt_raid_free_mem(mpt);
 2029                 return (EIO);
 2030         }
 2031 
 2032         mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
 2033             hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
 2034 
 2035         len = hdr.PageLength * sizeof(uint32_t);
 2036         mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 2037         if (mpt->ioc_page3 == NULL) {
 2038                 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
 2039                 mpt_raid_free_mem(mpt);
 2040                 return (ENOMEM);
 2041         }
 2042         memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
 2043         rv = mpt_read_cur_cfg_page(mpt, 0,
 2044             &mpt->ioc_page3->Header, len, FALSE, 5000);
 2045         if (rv) {
 2046                 mpt_raid_free_mem(mpt);
 2047                 return (EIO);
 2048         }
 2049         mpt2host_config_page_ioc3(mpt->ioc_page3);
 2050         mpt_raid_wakeup(mpt);
 2051         return (0);
 2052 }
 2053 
 2054 /*
 2055  * Enable IOC port
 2056  */
 2057 static int
 2058 mpt_send_port_enable(struct mpt_softc *mpt, int port)
 2059 {
 2060         request_t       *req;
 2061         MSG_PORT_ENABLE *enable_req;
 2062         int              error;
 2063 
 2064         req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
 2065         if (req == NULL)
 2066                 return (-1);
 2067 
 2068         enable_req = req->req_vbuf;
 2069         memset(enable_req, 0,  MPT_RQSL(mpt));
 2070 
 2071         enable_req->Function   = MPI_FUNCTION_PORT_ENABLE;
 2072         enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
 2073         enable_req->PortNumber = port;
 2074 
 2075         mpt_check_doorbell(mpt);
 2076         mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
 2077 
 2078         mpt_send_cmd(mpt, req);
 2079         error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
 2080             FALSE, (mpt->is_sas || mpt->is_fc)? 300000 : 30000);
 2081         if (error != 0) {
 2082                 mpt_prt(mpt, "port %d enable timed out\n", port);
 2083                 return (-1);
 2084         }
 2085         mpt_free_request(mpt, req);
 2086         mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
 2087         return (0);
 2088 }
 2089 
 2090 /*
 2091  * Enable/Disable asynchronous event reporting.
 2092  */
 2093 static int
 2094 mpt_send_event_request(struct mpt_softc *mpt, int onoff)
 2095 {
 2096         request_t *req;
 2097         MSG_EVENT_NOTIFY *enable_req;
 2098 
 2099         req = mpt_get_request(mpt, FALSE);
 2100         if (req == NULL) {
 2101                 return (ENOMEM);
 2102         }
 2103         enable_req = req->req_vbuf;
 2104         memset(enable_req, 0, sizeof *enable_req);
 2105 
 2106         enable_req->Function   = MPI_FUNCTION_EVENT_NOTIFICATION;
 2107         enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
 2108         enable_req->Switch     = onoff;
 2109 
 2110         mpt_check_doorbell(mpt);
 2111         mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
 2112             onoff ? "en" : "dis");
 2113         /*
 2114          * Send the command off, but don't wait for it.
 2115          */
 2116         mpt_send_cmd(mpt, req);
 2117         return (0);
 2118 }
 2119 
 2120 /*
 2121  * Un-mask the interrupts on the chip.
 2122  */
 2123 void
 2124 mpt_enable_ints(struct mpt_softc *mpt)
 2125 {
 2126 
 2127         /* Unmask every thing except door bell int */
 2128         mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
 2129 }
 2130 
 2131 /*
 2132  * Mask the interrupts on the chip.
 2133  */
 2134 void
 2135 mpt_disable_ints(struct mpt_softc *mpt)
 2136 {
 2137 
 2138         /* Mask all interrupts */
 2139         mpt_write(mpt, MPT_OFFSET_INTR_MASK,
 2140             MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
 2141 }
 2142 
 2143 static void
 2144 mpt_sysctl_attach(struct mpt_softc *mpt)
 2145 {
 2146         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
 2147         struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
 2148 
 2149         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
 2150                        "debug", CTLFLAG_RW, &mpt->verbose, 0,
 2151                        "Debugging/Verbose level");
 2152         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
 2153                        "role", CTLFLAG_RD, &mpt->role, 0,
 2154                        "HBA role");
 2155 #ifdef  MPT_TEST_MULTIPATH
 2156         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
 2157                        "failure_id", CTLFLAG_RW, &mpt->failure_id, -1,
 2158                        "Next Target to Fail");
 2159 #endif
 2160 }
 2161 
 2162 int
 2163 mpt_attach(struct mpt_softc *mpt)
 2164 {
 2165         struct mpt_personality *pers;
 2166         int i;
 2167         int error;
 2168 
 2169         mpt_core_attach(mpt);
 2170         mpt_core_enable(mpt);
 2171 
 2172         TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links);
 2173         for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
 2174                 pers = mpt_personalities[i];
 2175                 if (pers == NULL) {
 2176                         continue;
 2177                 }
 2178                 if (pers->probe(mpt) == 0) {
 2179                         error = pers->attach(mpt);
 2180                         if (error != 0) {
 2181                                 mpt_detach(mpt);
 2182                                 return (error);
 2183                         }
 2184                         mpt->mpt_pers_mask |= (0x1 << pers->id);
 2185                         pers->use_count++;
 2186                 }
 2187         }
 2188 
 2189         /*
 2190          * Now that we've attached everything, do the enable function
 2191          * for all of the personalities. This allows the personalities
 2192          * to do setups that are appropriate for them prior to enabling
 2193          * any ports.
 2194          */
 2195         for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
 2196                 pers = mpt_personalities[i];
 2197                 if (pers != NULL  && MPT_PERS_ATTACHED(pers, mpt) != 0) {
 2198                         error = pers->enable(mpt);
 2199                         if (error != 0) {
 2200                                 mpt_prt(mpt, "personality %s attached but would"
 2201                                     " not enable (%d)\n", pers->name, error);
 2202                                 mpt_detach(mpt);
 2203                                 return (error);
 2204                         }
 2205                 }
 2206         }
 2207         return (0);
 2208 }
 2209 
 2210 int
 2211 mpt_shutdown(struct mpt_softc *mpt)
 2212 {
 2213         struct mpt_personality *pers;
 2214 
 2215         MPT_PERS_FOREACH_REVERSE(mpt, pers) {
 2216                 pers->shutdown(mpt);
 2217         }
 2218         return (0);
 2219 }
 2220 
 2221 int
 2222 mpt_detach(struct mpt_softc *mpt)
 2223 {
 2224         struct mpt_personality *pers;
 2225 
 2226         MPT_PERS_FOREACH_REVERSE(mpt, pers) {
 2227                 pers->detach(mpt);
 2228                 mpt->mpt_pers_mask &= ~(0x1 << pers->id);
 2229                 pers->use_count--;
 2230         }
 2231         TAILQ_REMOVE(&mpt_tailq, mpt, links);
 2232         return (0);
 2233 }
 2234 
 2235 static int
 2236 mpt_core_load(struct mpt_personality *pers)
 2237 {
 2238         int i;
 2239 
 2240         /*
 2241          * Setup core handlers and insert the default handler
 2242          * into all "empty slots".
 2243          */
 2244         for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
 2245                 mpt_reply_handlers[i] = mpt_default_reply_handler;
 2246         }
 2247 
 2248         mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
 2249             mpt_event_reply_handler;
 2250         mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
 2251             mpt_config_reply_handler;
 2252         mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
 2253             mpt_handshake_reply_handler;
 2254         return (0);
 2255 }
 2256 
 2257 /*
 2258  * Initialize per-instance driver data and perform
 2259  * initial controller configuration.
 2260  */
 2261 static int
 2262 mpt_core_attach(struct mpt_softc *mpt)
 2263 {
 2264         int val, error;
 2265 
 2266         LIST_INIT(&mpt->ack_frames);
 2267         /* Put all request buffers on the free list */
 2268         TAILQ_INIT(&mpt->request_pending_list);
 2269         TAILQ_INIT(&mpt->request_free_list);
 2270         TAILQ_INIT(&mpt->request_timeout_list);
 2271         for (val = 0; val < MPT_MAX_LUNS; val++) {
 2272                 STAILQ_INIT(&mpt->trt[val].atios);
 2273                 STAILQ_INIT(&mpt->trt[val].inots);
 2274         }
 2275         STAILQ_INIT(&mpt->trt_wildcard.atios);
 2276         STAILQ_INIT(&mpt->trt_wildcard.inots);
 2277 #ifdef  MPT_TEST_MULTIPATH
 2278         mpt->failure_id = -1;
 2279 #endif
 2280         mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
 2281         mpt_sysctl_attach(mpt);
 2282         mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
 2283             mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
 2284 
 2285         MPT_LOCK(mpt);
 2286         error = mpt_configure_ioc(mpt, 0, 0);
 2287         MPT_UNLOCK(mpt);
 2288 
 2289         return (error);
 2290 }
 2291 
 2292 static int
 2293 mpt_core_enable(struct mpt_softc *mpt)
 2294 {
 2295 
 2296         /*
 2297          * We enter with the IOC enabled, but async events
 2298          * not enabled, ports not enabled and interrupts
 2299          * not enabled.
 2300          */
 2301         MPT_LOCK(mpt);
 2302 
 2303         /*
 2304          * Enable asynchronous event reporting- all personalities
 2305          * have attached so that they should be able to now field
 2306          * async events.
 2307          */
 2308         mpt_send_event_request(mpt, 1);
 2309 
 2310         /*
 2311          * Catch any pending interrupts
 2312          *
 2313          * This seems to be crucial- otherwise
 2314          * the portenable below times out.
 2315          */
 2316         mpt_intr(mpt);
 2317 
 2318         /*
 2319          * Enable Interrupts
 2320          */
 2321         mpt_enable_ints(mpt);
 2322 
 2323         /*
 2324          * Catch any pending interrupts
 2325          *
 2326          * This seems to be crucial- otherwise
 2327          * the portenable below times out.
 2328          */
 2329         mpt_intr(mpt);
 2330 
 2331         /*
 2332          * Enable the port.
 2333          */
 2334         if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
 2335                 mpt_prt(mpt, "failed to enable port 0\n");
 2336                 MPT_UNLOCK(mpt);
 2337                 return (ENXIO);
 2338         }
 2339         MPT_UNLOCK(mpt);
 2340         return (0);
 2341 }
 2342 
 2343 static void
 2344 mpt_core_shutdown(struct mpt_softc *mpt)
 2345 {
 2346 
 2347         mpt_disable_ints(mpt);
 2348 }
 2349 
 2350 static void
 2351 mpt_core_detach(struct mpt_softc *mpt)
 2352 {
 2353         int val;
 2354 
 2355         /*
 2356          * XXX: FREE MEMORY 
 2357          */
 2358         mpt_disable_ints(mpt);
 2359 
 2360         /* Make sure no request has pending timeouts. */
 2361         for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
 2362                 request_t *req = &mpt->request_pool[val];
 2363                 mpt_callout_drain(mpt, &req->callout);
 2364         }
 2365 
 2366         mpt_dma_buf_free(mpt);
 2367 }
 2368 
 2369 static int
 2370 mpt_core_unload(struct mpt_personality *pers)
 2371 {
 2372 
 2373         /* Unload is always successful. */
 2374         return (0);
 2375 }
 2376 
 2377 #define FW_UPLOAD_REQ_SIZE                              \
 2378         (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION)  \
 2379        + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
 2380 
 2381 static int
 2382 mpt_upload_fw(struct mpt_softc *mpt)
 2383 {
 2384         uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
 2385         MSG_FW_UPLOAD_REPLY fw_reply;
 2386         MSG_FW_UPLOAD *fw_req;
 2387         FW_UPLOAD_TCSGE *tsge;
 2388         SGE_SIMPLE32 *sge;
 2389         uint32_t flags;
 2390         int error;
 2391 
 2392         memset(&fw_req_buf, 0, sizeof(fw_req_buf));
 2393         fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
 2394         fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
 2395         fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
 2396         fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
 2397         tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
 2398         tsge->DetailsLength = 12;
 2399         tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
 2400         tsge->ImageSize = htole32(mpt->fw_image_size);
 2401         sge = (SGE_SIMPLE32 *)(tsge + 1);
 2402         flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
 2403               | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
 2404               | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
 2405         flags <<= MPI_SGE_FLAGS_SHIFT;
 2406         sge->FlagsLength = htole32(flags | mpt->fw_image_size);
 2407         sge->Address = htole32(mpt->fw_phys);
 2408         bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREREAD);
 2409         error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
 2410         if (error)
 2411                 return(error);
 2412         error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
 2413         bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTREAD);
 2414         return (error);
 2415 }
 2416 
 2417 static void
 2418 mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
 2419                uint32_t *data, bus_size_t len)
 2420 {
 2421         uint32_t *data_end;
 2422 
 2423         data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
 2424         if (mpt->is_sas) {
 2425                 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
 2426         }
 2427         mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
 2428         while (data != data_end) {
 2429                 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
 2430                 data++;
 2431         }
 2432         if (mpt->is_sas) {
 2433                 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
 2434         }
 2435 }
 2436 
 2437 static int
 2438 mpt_download_fw(struct mpt_softc *mpt)
 2439 {
 2440         MpiFwHeader_t *fw_hdr;
 2441         int error;
 2442         uint32_t ext_offset;
 2443         uint32_t data;
 2444 
 2445         if (mpt->pci_pio_reg == NULL) {
 2446                 mpt_prt(mpt, "No PIO resource!\n");
 2447                 return (ENXIO);
 2448         }
 2449 
 2450         mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
 2451                 mpt->fw_image_size);
 2452 
 2453         error = mpt_enable_diag_mode(mpt);
 2454         if (error != 0) {
 2455                 mpt_prt(mpt, "Could not enter diagnostic mode!\n");
 2456                 return (EIO);
 2457         }
 2458 
 2459         mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
 2460                   MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
 2461 
 2462         fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
 2463         bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREWRITE);
 2464         mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
 2465                        fw_hdr->ImageSize);
 2466         bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTWRITE);
 2467 
 2468         ext_offset = fw_hdr->NextImageHeaderOffset;
 2469         while (ext_offset != 0) {
 2470                 MpiExtImageHeader_t *ext;
 2471 
 2472                 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
 2473                 ext_offset = ext->NextImageHeaderOffset;
 2474                 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap,
 2475                     BUS_DMASYNC_PREWRITE);
 2476                 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
 2477                                ext->ImageSize);
 2478                 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap,
 2479                     BUS_DMASYNC_POSTWRITE);
 2480         }
 2481 
 2482         if (mpt->is_sas) {
 2483                 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
 2484         }
 2485         /* Setup the address to jump to on reset. */
 2486         mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
 2487         mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
 2488 
 2489         /*
 2490          * The controller sets the "flash bad" status after attempting
 2491          * to auto-boot from flash.  Clear the status so that the controller
 2492          * will continue the boot process with our newly installed firmware.
 2493          */
 2494         mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
 2495         data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
 2496         mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
 2497         mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
 2498 
 2499         if (mpt->is_sas) {
 2500                 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
 2501         }
 2502 
 2503         /*
 2504          * Re-enable the processor and clear the boot halt flag.
 2505          */
 2506         data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
 2507         data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
 2508         mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
 2509 
 2510         mpt_disable_diag_mode(mpt);
 2511         return (0);
 2512 }
 2513 
 2514 static int
 2515 mpt_dma_buf_alloc(struct mpt_softc *mpt)
 2516 {
 2517         struct mpt_map_info mi;
 2518         uint8_t *vptr;
 2519         uint32_t pptr, end;
 2520         int i, error;
 2521 
 2522         /* Create a child tag for data buffers */
 2523         if (mpt_dma_tag_create(mpt, mpt->parent_dmat, 1,
 2524             0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
 2525             NULL, NULL, (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE,
 2526             mpt->max_cam_seg_cnt, BUS_SPACE_MAXSIZE_32BIT, 0,
 2527             &mpt->buffer_dmat) != 0) {
 2528                 mpt_prt(mpt, "cannot create a dma tag for data buffers\n");
 2529                 return (1);
 2530         }
 2531 
 2532         /* Create a child tag for request buffers */
 2533         if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0,
 2534             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
 2535             NULL, NULL, MPT_REQ_MEM_SIZE(mpt), 1, BUS_SPACE_MAXSIZE_32BIT, 0,
 2536             &mpt->request_dmat) != 0) {
 2537                 mpt_prt(mpt, "cannot create a dma tag for requests\n");
 2538                 return (1);
 2539         }
 2540 
 2541         /* Allocate some DMA accessible memory for requests */
 2542         if (bus_dmamem_alloc(mpt->request_dmat, (void **)&mpt->request,
 2543             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &mpt->request_dmap) != 0) {
 2544                 mpt_prt(mpt, "cannot allocate %d bytes of request memory\n",
 2545                     MPT_REQ_MEM_SIZE(mpt));
 2546                 return (1);
 2547         }
 2548 
 2549         mi.mpt = mpt;
 2550         mi.error = 0;
 2551 
 2552         /* Load and lock it into "bus space" */
 2553         bus_dmamap_load(mpt->request_dmat, mpt->request_dmap, mpt->request,
 2554             MPT_REQ_MEM_SIZE(mpt), mpt_map_rquest, &mi, 0);
 2555 
 2556         if (mi.error) {
 2557                 mpt_prt(mpt, "error %d loading dma map for DMA request queue\n",
 2558                     mi.error);
 2559                 return (1);
 2560         }
 2561         mpt->request_phys = mi.phys;
 2562 
 2563         /*
 2564          * Now create per-request dma maps
 2565          */
 2566         i = 0;
 2567         pptr =  mpt->request_phys;
 2568         vptr =  mpt->request;
 2569         end = pptr + MPT_REQ_MEM_SIZE(mpt);
 2570         while(pptr < end) {
 2571                 request_t *req = &mpt->request_pool[i];
 2572                 req->index = i++;
 2573 
 2574                 /* Store location of Request Data */
 2575                 req->req_pbuf = pptr;
 2576                 req->req_vbuf = vptr;
 2577 
 2578                 pptr += MPT_REQUEST_AREA;
 2579                 vptr += MPT_REQUEST_AREA;
 2580 
 2581                 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
 2582                 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
 2583 
 2584                 error = bus_dmamap_create(mpt->buffer_dmat, 0, &req->dmap);
 2585                 if (error) {
 2586                         mpt_prt(mpt, "error %d creating per-cmd DMA maps\n",
 2587                             error);
 2588                         return (1);
 2589                 }
 2590         }
 2591 
 2592         return (0);
 2593 }
 2594 
 2595 static void
 2596 mpt_dma_buf_free(struct mpt_softc *mpt)
 2597 {
 2598         int i;
 2599 
 2600         if (mpt->request_dmat == 0) {
 2601                 mpt_lprt(mpt, MPT_PRT_DEBUG, "already released dma memory\n");
 2602                 return;
 2603         }
 2604         for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
 2605                 bus_dmamap_destroy(mpt->buffer_dmat, mpt->request_pool[i].dmap);
 2606         }
 2607         bus_dmamap_unload(mpt->request_dmat, mpt->request_dmap);
 2608         bus_dmamem_free(mpt->request_dmat, mpt->request, mpt->request_dmap);
 2609         bus_dma_tag_destroy(mpt->request_dmat);
 2610         mpt->request_dmat = 0;
 2611         bus_dma_tag_destroy(mpt->buffer_dmat);
 2612 }
 2613 
 2614 /*
 2615  * Allocate/Initialize data structures for the controller.  Called
 2616  * once at instance startup.
 2617  */
 2618 static int
 2619 mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset)
 2620 {
 2621         PTR_MSG_PORT_FACTS_REPLY pfp;
 2622         int error, port, val;
 2623         size_t len;
 2624 
 2625         if (tn == MPT_MAX_TRYS) {
 2626                 return (-1);
 2627         }
 2628 
 2629         /*
 2630          * No need to reset if the IOC is already in the READY state.
 2631          *
 2632          * Force reset if initialization failed previously.
 2633          * Note that a hard_reset of the second channel of a '929
 2634          * will stop operation of the first channel.  Hopefully, if the
 2635          * first channel is ok, the second will not require a hard
 2636          * reset.
 2637          */
 2638         if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) {
 2639                 if (mpt_reset(mpt, FALSE) != MPT_OK) {
 2640                         return (mpt_configure_ioc(mpt, tn++, 1));
 2641                 }
 2642                 needreset = 0;
 2643         }
 2644 
 2645         if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) {
 2646                 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
 2647                 return (mpt_configure_ioc(mpt, tn++, 1));
 2648         }
 2649         mpt2host_iocfacts_reply(&mpt->ioc_facts);
 2650 
 2651         mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
 2652             mpt->ioc_facts.MsgVersion >> 8,
 2653             mpt->ioc_facts.MsgVersion & 0xFF,
 2654             mpt->ioc_facts.HeaderVersion >> 8,
 2655             mpt->ioc_facts.HeaderVersion & 0xFF);
 2656 
 2657         /*
 2658          * Now that we know request frame size, we can calculate
 2659          * the actual (reasonable) segment limit for read/write I/O.
 2660          *
 2661          * This limit is constrained by:
 2662          *
 2663          *  + The size of each area we allocate per command (and how
 2664          *    many chain segments we can fit into it).
 2665          *  + The total number of areas we've set up.
 2666          *  + The actual chain depth the card will allow.
 2667          *
 2668          * The first area's segment count is limited by the I/O request
 2669          * at the head of it. We cannot allocate realistically more
 2670          * than MPT_MAX_REQUESTS areas. Therefore, to account for both
 2671          * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
 2672          *
 2673          */
 2674         /* total number of request areas we (can) allocate */
 2675         mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
 2676 
 2677         /* converted to the number of chain areas possible */
 2678         mpt->max_seg_cnt *= MPT_NRFM(mpt);
 2679 
 2680         /* limited by the number of chain areas the card will support */
 2681         if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) {
 2682                 mpt_lprt(mpt, MPT_PRT_INFO,
 2683                     "chain depth limited to %u (from %u)\n",
 2684                     mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt);
 2685                 mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth;
 2686         }
 2687 
 2688         /* converted to the number of simple sges in chain segments. */
 2689         mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
 2690 
 2691         /*
 2692          * Use this as the basis for reporting the maximum I/O size to CAM.
 2693          */
 2694         mpt->max_cam_seg_cnt = min(mpt->max_seg_cnt, btoc(maxphys) + 1);
 2695 
 2696         /* XXX Lame Locking! */
 2697         MPT_UNLOCK(mpt);
 2698         error = mpt_dma_buf_alloc(mpt);
 2699         MPT_LOCK(mpt);
 2700 
 2701         if (error != 0) {
 2702                 mpt_prt(mpt, "mpt_dma_buf_alloc() failed!\n");
 2703                 return (EIO);
 2704         }
 2705 
 2706         for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
 2707                 request_t *req = &mpt->request_pool[val];
 2708                 req->state = REQ_STATE_ALLOCATED;
 2709                 mpt_callout_init(mpt, &req->callout);
 2710                 mpt_free_request(mpt, req);
 2711         }
 2712 
 2713         mpt_lprt(mpt, MPT_PRT_INFO, "Maximum Segment Count: %u, Maximum "
 2714                  "CAM Segment Count: %u\n", mpt->max_seg_cnt,
 2715                  mpt->max_cam_seg_cnt);
 2716 
 2717         mpt_lprt(mpt, MPT_PRT_INFO, "MsgLength=%u IOCNumber = %d\n",
 2718             mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber);
 2719         mpt_lprt(mpt, MPT_PRT_INFO,
 2720             "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
 2721             "Request Frame Size %u bytes Max Chain Depth %u\n",
 2722             mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize,
 2723             mpt->ioc_facts.RequestFrameSize << 2,
 2724             mpt->ioc_facts.MaxChainDepth);
 2725         mpt_lprt(mpt, MPT_PRT_INFO, "IOCFACTS: Num Ports %d, FWImageSize %d, "
 2726             "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts,
 2727             mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags);
 2728 
 2729         len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY);
 2730         mpt->port_facts = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 2731         if (mpt->port_facts == NULL) {
 2732                 mpt_prt(mpt, "unable to allocate memory for port facts\n");
 2733                 return (ENOMEM);
 2734         }
 2735 
 2736         if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) &&
 2737             (mpt->fw_uploaded == 0)) {
 2738                 struct mpt_map_info mi;
 2739 
 2740                 /*
 2741                  * In some configurations, the IOC's firmware is
 2742                  * stored in a shared piece of system NVRAM that
 2743                  * is only accessible via the BIOS.  In this
 2744                  * case, the firmware keeps a copy of firmware in
 2745                  * RAM until the OS driver retrieves it.  Once
 2746                  * retrieved, we are responsible for re-downloading
 2747                  * the firmware after any hard-reset.
 2748                  */
 2749                 MPT_UNLOCK(mpt);
 2750                 mpt->fw_image_size = mpt->ioc_facts.FWImageSize;
 2751                 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
 2752                     BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 2753                     mpt->fw_image_size, 1, mpt->fw_image_size, 0,
 2754                     &mpt->fw_dmat);
 2755                 if (error != 0) {
 2756                         mpt_prt(mpt, "cannot create firmware dma tag\n");
 2757                         MPT_LOCK(mpt);
 2758                         return (ENOMEM);
 2759                 }
 2760                 error = bus_dmamem_alloc(mpt->fw_dmat,
 2761                     (void **)&mpt->fw_image, BUS_DMA_NOWAIT |
 2762                     BUS_DMA_COHERENT, &mpt->fw_dmap);
 2763                 if (error != 0) {
 2764                         mpt_prt(mpt, "cannot allocate firmware memory\n");
 2765                         bus_dma_tag_destroy(mpt->fw_dmat);
 2766                         MPT_LOCK(mpt);
 2767                         return (ENOMEM);
 2768                 }
 2769                 mi.mpt = mpt;
 2770                 mi.error = 0;
 2771                 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
 2772                     mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0);
 2773                 mpt->fw_phys = mi.phys;
 2774 
 2775                 MPT_LOCK(mpt);
 2776                 error = mpt_upload_fw(mpt);
 2777                 if (error != 0) {
 2778                         mpt_prt(mpt, "firmware upload failed.\n");
 2779                         bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
 2780                         bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
 2781                             mpt->fw_dmap);
 2782                         bus_dma_tag_destroy(mpt->fw_dmat);
 2783                         mpt->fw_image = NULL;
 2784                         return (EIO);
 2785                 }
 2786                 mpt->fw_uploaded = 1;
 2787         }
 2788 
 2789         for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) {
 2790                 pfp = &mpt->port_facts[port];
 2791                 error = mpt_get_portfacts(mpt, 0, pfp);
 2792                 if (error != MPT_OK) {
 2793                         mpt_prt(mpt,
 2794                             "mpt_get_portfacts on port %d failed\n", port);
 2795                         free(mpt->port_facts, M_DEVBUF);
 2796                         mpt->port_facts = NULL;
 2797                         return (mpt_configure_ioc(mpt, tn++, 1));
 2798                 }
 2799                 mpt2host_portfacts_reply(pfp);
 2800 
 2801                 if (port > 0) {
 2802                         error = MPT_PRT_INFO;
 2803                 } else {
 2804                         error = MPT_PRT_DEBUG;
 2805                 }
 2806                 mpt_lprt(mpt, error,
 2807                     "PORTFACTS[%d]: Type %x PFlags %x IID %d MaxDev %d\n",
 2808                     port, pfp->PortType, pfp->ProtocolFlags, pfp->PortSCSIID,
 2809                     pfp->MaxDevices);
 2810         }
 2811 
 2812         /*
 2813          * XXX: Not yet supporting more than port 0
 2814          */
 2815         pfp = &mpt->port_facts[0];
 2816         if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_FC) {
 2817                 mpt->is_fc = 1;
 2818                 mpt->is_sas = 0;
 2819                 mpt->is_spi = 0;
 2820         } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
 2821                 mpt->is_fc = 0;
 2822                 mpt->is_sas = 1;
 2823                 mpt->is_spi = 0;
 2824         } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SCSI) {
 2825                 mpt->is_fc = 0;
 2826                 mpt->is_sas = 0;
 2827                 mpt->is_spi = 1;
 2828                 if (mpt->mpt_ini_id == MPT_INI_ID_NONE)
 2829                         mpt->mpt_ini_id = pfp->PortSCSIID;
 2830         } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_ISCSI) {
 2831                 mpt_prt(mpt, "iSCSI not supported yet\n");
 2832                 return (ENXIO);
 2833         } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_INACTIVE) {
 2834                 mpt_prt(mpt, "Inactive Port\n");
 2835                 return (ENXIO);
 2836         } else {
 2837                 mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType);
 2838                 return (ENXIO);
 2839         }
 2840 
 2841         /*
 2842          * Set our role with what this port supports.
 2843          *
 2844          * Note this might be changed later in different modules
 2845          * if this is different from what is wanted.
 2846          */
 2847         mpt->role = MPT_ROLE_NONE;
 2848         if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
 2849                 mpt->role |= MPT_ROLE_INITIATOR;
 2850         }
 2851         if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
 2852                 mpt->role |= MPT_ROLE_TARGET;
 2853         }
 2854 
 2855         /*
 2856          * Enable the IOC
 2857          */
 2858         if (mpt_enable_ioc(mpt, 1) != MPT_OK) {
 2859                 mpt_prt(mpt, "unable to initialize IOC\n");
 2860                 return (ENXIO);
 2861         }
 2862 
 2863         /*
 2864          * Read IOC configuration information.
 2865          *
 2866          * We need this to determine whether or not we have certain
 2867          * settings for Integrated Mirroring (e.g.).
 2868          */
 2869         mpt_read_config_info_ioc(mpt);
 2870 
 2871         return (0);
 2872 }
 2873 
 2874 static int
 2875 mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
 2876 {
 2877         uint32_t pptr;
 2878         int val;
 2879 
 2880         if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
 2881                 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
 2882                 return (EIO);
 2883         }
 2884 
 2885         mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
 2886 
 2887         if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
 2888                 mpt_prt(mpt, "IOC failed to go to run state\n");
 2889                 return (ENXIO);
 2890         }
 2891         mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
 2892 
 2893         /*
 2894          * Give it reply buffers
 2895          *
 2896          * Do *not* exceed global credits.
 2897          */
 2898         for (val = 0, pptr = mpt->reply_phys;
 2899             (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
 2900              pptr += MPT_REPLY_SIZE) {
 2901                 mpt_free_reply(mpt, pptr);
 2902                 if (++val == mpt->ioc_facts.GlobalCredits - 1)
 2903                         break;
 2904         }
 2905 
 2906         /*
 2907          * Enable the port if asked. This is only done if we're resetting
 2908          * the IOC after initial startup.
 2909          */
 2910         if (portenable) {
 2911                 /*
 2912                  * Enable asynchronous event reporting
 2913                  */
 2914                 mpt_send_event_request(mpt, 1);
 2915 
 2916                 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
 2917                         mpt_prt(mpt, "%s: failed to enable port 0\n", __func__);
 2918                         return (ENXIO);
 2919                 }
 2920         }
 2921         return (MPT_OK);
 2922 }
 2923 
 2924 /*
 2925  * Endian Conversion Functions- only used on Big Endian machines
 2926  */
 2927 #if     _BYTE_ORDER == _BIG_ENDIAN
 2928 void
 2929 mpt2host_sge_simple_union(SGE_SIMPLE_UNION *sge)
 2930 {
 2931 
 2932         MPT_2_HOST32(sge, FlagsLength);
 2933         MPT_2_HOST32(sge, u.Address64.Low);
 2934         MPT_2_HOST32(sge, u.Address64.High);
 2935 }
 2936 
 2937 void
 2938 mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *rp)
 2939 {
 2940 
 2941         MPT_2_HOST16(rp, MsgVersion);
 2942         MPT_2_HOST16(rp, HeaderVersion);
 2943         MPT_2_HOST32(rp, MsgContext);
 2944         MPT_2_HOST16(rp, IOCExceptions);
 2945         MPT_2_HOST16(rp, IOCStatus);
 2946         MPT_2_HOST32(rp, IOCLogInfo);
 2947         MPT_2_HOST16(rp, ReplyQueueDepth);
 2948         MPT_2_HOST16(rp, RequestFrameSize);
 2949         MPT_2_HOST16(rp, Reserved_0101_FWVersion);
 2950         MPT_2_HOST16(rp, ProductID);
 2951         MPT_2_HOST32(rp, CurrentHostMfaHighAddr);
 2952         MPT_2_HOST16(rp, GlobalCredits);
 2953         MPT_2_HOST32(rp, CurrentSenseBufferHighAddr);
 2954         MPT_2_HOST16(rp, CurReplyFrameSize);
 2955         MPT_2_HOST32(rp, FWImageSize);
 2956         MPT_2_HOST32(rp, IOCCapabilities);
 2957         MPT_2_HOST32(rp, FWVersion.Word);
 2958         MPT_2_HOST16(rp, HighPriorityQueueDepth);
 2959         MPT_2_HOST16(rp, Reserved2);
 2960         mpt2host_sge_simple_union(&rp->HostPageBufferSGE);
 2961         MPT_2_HOST32(rp, ReplyFifoHostSignalingAddr);
 2962 }
 2963 
 2964 void
 2965 mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *pfp)
 2966 {
 2967 
 2968         MPT_2_HOST16(pfp, Reserved);
 2969         MPT_2_HOST16(pfp, Reserved1);
 2970         MPT_2_HOST32(pfp, MsgContext);
 2971         MPT_2_HOST16(pfp, Reserved2);
 2972         MPT_2_HOST16(pfp, IOCStatus);
 2973         MPT_2_HOST32(pfp, IOCLogInfo);
 2974         MPT_2_HOST16(pfp, MaxDevices);
 2975         MPT_2_HOST16(pfp, PortSCSIID);
 2976         MPT_2_HOST16(pfp, ProtocolFlags);
 2977         MPT_2_HOST16(pfp, MaxPostedCmdBuffers);
 2978         MPT_2_HOST16(pfp, MaxPersistentIDs);
 2979         MPT_2_HOST16(pfp, MaxLanBuckets);
 2980         MPT_2_HOST16(pfp, Reserved4);
 2981         MPT_2_HOST32(pfp, Reserved5);
 2982 }
 2983 
 2984 void
 2985 mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *ioc2)
 2986 {
 2987         int i;
 2988 
 2989         MPT_2_HOST32(ioc2, CapabilitiesFlags);
 2990         for (i = 0; i < MPI_IOC_PAGE_2_RAID_VOLUME_MAX; i++) {
 2991                 MPT_2_HOST16(ioc2, RaidVolume[i].Reserved3);
 2992         }
 2993 }
 2994 
 2995 void
 2996 mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *ioc3)
 2997 {
 2998 
 2999         MPT_2_HOST16(ioc3, Reserved2);
 3000 }
 3001 
 3002 void
 3003 mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *sp0)
 3004 {
 3005 
 3006         MPT_2_HOST32(sp0, Capabilities);
 3007         MPT_2_HOST32(sp0, PhysicalInterface);
 3008 }
 3009 
 3010 void
 3011 mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
 3012 {
 3013 
 3014         MPT_2_HOST32(sp1, Configuration);
 3015         MPT_2_HOST32(sp1, OnBusTimerValue);
 3016         MPT_2_HOST16(sp1, IDConfig);
 3017 }
 3018 
 3019 void
 3020 host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
 3021 {
 3022 
 3023         HOST_2_MPT32(sp1, Configuration);
 3024         HOST_2_MPT32(sp1, OnBusTimerValue);
 3025         HOST_2_MPT16(sp1, IDConfig);
 3026 }
 3027 
 3028 void
 3029 mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *sp2)
 3030 {
 3031         int i;
 3032 
 3033         MPT_2_HOST32(sp2, PortFlags);
 3034         MPT_2_HOST32(sp2, PortSettings);
 3035         for (i = 0; i < sizeof(sp2->DeviceSettings) /
 3036             sizeof(*sp2->DeviceSettings); i++) {
 3037                 MPT_2_HOST16(sp2, DeviceSettings[i].DeviceFlags);
 3038         }
 3039 }
 3040 
 3041 void
 3042 mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *sd0)
 3043 {
 3044 
 3045         MPT_2_HOST32(sd0, NegotiatedParameters);
 3046         MPT_2_HOST32(sd0, Information);
 3047 }
 3048 
 3049 void
 3050 mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
 3051 {
 3052 
 3053         MPT_2_HOST32(sd1, RequestedParameters);
 3054         MPT_2_HOST32(sd1, Reserved);
 3055         MPT_2_HOST32(sd1, Configuration);
 3056 }
 3057 
 3058 void
 3059 host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
 3060 {
 3061 
 3062         HOST_2_MPT32(sd1, RequestedParameters);
 3063         HOST_2_MPT32(sd1, Reserved);
 3064         HOST_2_MPT32(sd1, Configuration);
 3065 }
 3066 
 3067 void
 3068 mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *fp0)
 3069 {
 3070 
 3071         MPT_2_HOST32(fp0, Flags);
 3072         MPT_2_HOST32(fp0, PortIdentifier);
 3073         MPT_2_HOST32(fp0, WWNN.Low);
 3074         MPT_2_HOST32(fp0, WWNN.High);
 3075         MPT_2_HOST32(fp0, WWPN.Low);
 3076         MPT_2_HOST32(fp0, WWPN.High);
 3077         MPT_2_HOST32(fp0, SupportedServiceClass);
 3078         MPT_2_HOST32(fp0, SupportedSpeeds);
 3079         MPT_2_HOST32(fp0, CurrentSpeed);
 3080         MPT_2_HOST32(fp0, MaxFrameSize);
 3081         MPT_2_HOST32(fp0, FabricWWNN.Low);
 3082         MPT_2_HOST32(fp0, FabricWWNN.High);
 3083         MPT_2_HOST32(fp0, FabricWWPN.Low);
 3084         MPT_2_HOST32(fp0, FabricWWPN.High);
 3085         MPT_2_HOST32(fp0, DiscoveredPortsCount);
 3086         MPT_2_HOST32(fp0, MaxInitiators);
 3087 }
 3088 
 3089 void
 3090 mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
 3091 {
 3092 
 3093         MPT_2_HOST32(fp1, Flags);
 3094         MPT_2_HOST32(fp1, NoSEEPROMWWNN.Low);
 3095         MPT_2_HOST32(fp1, NoSEEPROMWWNN.High);
 3096         MPT_2_HOST32(fp1, NoSEEPROMWWPN.Low);
 3097         MPT_2_HOST32(fp1, NoSEEPROMWWPN.High);
 3098 }
 3099 
 3100 void
 3101 host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
 3102 {
 3103 
 3104         HOST_2_MPT32(fp1, Flags);
 3105         HOST_2_MPT32(fp1, NoSEEPROMWWNN.Low);
 3106         HOST_2_MPT32(fp1, NoSEEPROMWWNN.High);
 3107         HOST_2_MPT32(fp1, NoSEEPROMWWPN.Low);
 3108         HOST_2_MPT32(fp1, NoSEEPROMWWPN.High);
 3109 }
 3110 
 3111 void
 3112 mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *volp)
 3113 {
 3114         int i;
 3115 
 3116         MPT_2_HOST16(volp, VolumeStatus.Reserved);
 3117         MPT_2_HOST16(volp, VolumeSettings.Settings);
 3118         MPT_2_HOST32(volp, MaxLBA);
 3119         MPT_2_HOST32(volp, MaxLBAHigh);
 3120         MPT_2_HOST32(volp, StripeSize);
 3121         MPT_2_HOST32(volp, Reserved2);
 3122         MPT_2_HOST32(volp, Reserved3);
 3123         for (i = 0; i < MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX; i++) {
 3124                 MPT_2_HOST16(volp, PhysDisk[i].Reserved);
 3125         }
 3126 }
 3127 
 3128 void
 3129 mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *rpd0)
 3130 {
 3131 
 3132         MPT_2_HOST32(rpd0, Reserved1);
 3133         MPT_2_HOST16(rpd0, PhysDiskStatus.Reserved);
 3134         MPT_2_HOST32(rpd0, MaxLBA);
 3135         MPT_2_HOST16(rpd0, ErrorData.Reserved);
 3136         MPT_2_HOST16(rpd0, ErrorData.ErrorCount);
 3137         MPT_2_HOST16(rpd0, ErrorData.SmartCount);
 3138 }
 3139 
 3140 void
 3141 mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *vi)
 3142 {
 3143 
 3144         MPT_2_HOST16(vi, TotalBlocks.High);
 3145         MPT_2_HOST16(vi, TotalBlocks.Low);
 3146         MPT_2_HOST16(vi, BlocksRemaining.High);
 3147         MPT_2_HOST16(vi, BlocksRemaining.Low);
 3148 }
 3149 #endif

Cache object: a6fde2ab8c7779a2d778506ba2ac894c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.