The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mpt/mpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Generic routines for LSI Fusion adapters.
    3  * FreeBSD Version.
    4  *
    5  * Copyright (c) 2000, 2001 by Greg Ansley
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice immediately at the beginning of the file, without modification,
   12  *    this list of conditions, and the following disclaimer.
   13  * 2. The name of the author may not be used to endorse or promote products
   14  *    derived from this software without specific prior written permission.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  */
   28 /*-
   29  * Copyright (c) 2002, 2006 by Matthew Jacob
   30  * All rights reserved.
   31  *
   32  * Redistribution and use in source and binary forms, with or without
   33  * modification, are permitted provided that the following conditions are
   34  * met:
   35  * 1. Redistributions of source code must retain the above copyright
   36  *    notice, this list of conditions and the following disclaimer.
   37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
   38  *    substantially similar to the "NO WARRANTY" disclaimer below
   39  *    ("Disclaimer") and any redistribution must be conditioned upon including
   40  *    a substantially similar Disclaimer requirement for further binary
   41  *    redistribution.
   42  * 3. Neither the names of the above listed copyright holders nor the names
   43  *    of any contributors may be used to endorse or promote products derived
   44  *    from this software without specific prior written permission.
   45  *
   46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
   56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   57  *
   58  * Support from Chris Ellsworth in order to make SAS adapters work
   59  * is gratefully acknowledged.
   60  *
   61  *
   62  * Support from LSI-Logic has also gone a great deal toward making this a
   63  * workable subsystem and is gratefully acknowledged.
   64  */
   65 /*-
   66  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
   67  * Copyright (c) 2005, WHEEL Sp. z o.o.
   68  * Copyright (c) 2004, 2005 Justin T. Gibbs
   69  * All rights reserved.
   70  *
   71  * Redistribution and use in source and binary forms, with or without
   72  * modification, are permitted provided that the following conditions are
   73  * met:
   74  * 1. Redistributions of source code must retain the above copyright
   75  *    notice, this list of conditions and the following disclaimer.
   76  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
   77  *    substantially similar to the "NO WARRANTY" disclaimer below
   78  *    ("Disclaimer") and any redistribution must be conditioned upon including
   79  *    a substantially similar Disclaimer requirement for further binary
   80  *    redistribution.
   81  * 3. Neither the names of the above listed copyright holders nor the names
   82  *    of any contributors may be used to endorse or promote products derived
   83  *    from this software without specific prior written permission.
   84  *
   85  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   86  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   87  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   88  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   89  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   90  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   91  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   92  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   93  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   94  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
   95  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   96  */
   97 
   98 #include <sys/cdefs.h>
   99 __FBSDID("$FreeBSD: releng/8.4/sys/dev/mpt/mpt.c 245984 2013-01-27 17:13:18Z marius $");
  100 
  101 #include <dev/mpt/mpt.h>
  102 #include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
  103 #include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
  104 
  105 #include <dev/mpt/mpilib/mpi.h>
  106 #include <dev/mpt/mpilib/mpi_ioc.h>
  107 #include <dev/mpt/mpilib/mpi_fc.h>
  108 #include <dev/mpt/mpilib/mpi_targ.h>
  109 
  110 #include <sys/sysctl.h>
  111 
  112 #define MPT_MAX_TRYS 3
  113 #define MPT_MAX_WAIT 300000
  114 
  115 static int maxwait_ack = 0;
  116 static int maxwait_int = 0;
  117 static int maxwait_state = 0;
  118 
  119 static TAILQ_HEAD(, mpt_softc)  mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
  120 mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
  121 
  122 static mpt_reply_handler_t mpt_default_reply_handler;
  123 static mpt_reply_handler_t mpt_config_reply_handler;
  124 static mpt_reply_handler_t mpt_handshake_reply_handler;
  125 static mpt_reply_handler_t mpt_event_reply_handler;
  126 static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
  127                                MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
  128 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
  129 static int mpt_soft_reset(struct mpt_softc *mpt);
  130 static void mpt_hard_reset(struct mpt_softc *mpt);
  131 static int mpt_dma_buf_alloc(struct mpt_softc *mpt);
  132 static void mpt_dma_buf_free(struct mpt_softc *mpt);
  133 static int mpt_configure_ioc(struct mpt_softc *mpt, int, int);
  134 static int mpt_enable_ioc(struct mpt_softc *mpt, int);
  135 
  136 /************************* Personality Module Support *************************/
  137 /*
  138  * We include one extra entry that is guaranteed to be NULL
  139  * to simplify our itterator.
  140  */
  141 static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
  142 static __inline struct mpt_personality*
  143         mpt_pers_find(struct mpt_softc *, u_int);
  144 static __inline struct mpt_personality*
  145         mpt_pers_find_reverse(struct mpt_softc *, u_int);
  146 
  147 static __inline struct mpt_personality *
  148 mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
  149 {
  150         KASSERT(start_at <= MPT_MAX_PERSONALITIES,
  151                 ("mpt_pers_find: starting position out of range"));
  152 
  153         while (start_at < MPT_MAX_PERSONALITIES
  154             && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
  155                 start_at++;
  156         }
  157         return (mpt_personalities[start_at]);
  158 }
  159 
  160 /*
  161  * Used infrequently, so no need to optimize like a forward
  162  * traversal where we use the MAX+1 is guaranteed to be NULL
  163  * trick.
  164  */
  165 static __inline struct mpt_personality *
  166 mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
  167 {
  168         while (start_at < MPT_MAX_PERSONALITIES
  169             && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
  170                 start_at--;
  171         }
  172         if (start_at < MPT_MAX_PERSONALITIES)
  173                 return (mpt_personalities[start_at]);
  174         return (NULL);
  175 }
  176 
  177 #define MPT_PERS_FOREACH(mpt, pers)                             \
  178         for (pers = mpt_pers_find(mpt, /*start_at*/0);          \
  179              pers != NULL;                                      \
  180              pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
  181 
  182 #define MPT_PERS_FOREACH_REVERSE(mpt, pers)                             \
  183         for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
  184              pers != NULL;                                              \
  185              pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
  186 
  187 static mpt_load_handler_t      mpt_stdload;
  188 static mpt_probe_handler_t     mpt_stdprobe;
  189 static mpt_attach_handler_t    mpt_stdattach;
  190 static mpt_enable_handler_t    mpt_stdenable;
  191 static mpt_ready_handler_t     mpt_stdready;
  192 static mpt_event_handler_t     mpt_stdevent;
  193 static mpt_reset_handler_t     mpt_stdreset;
  194 static mpt_shutdown_handler_t  mpt_stdshutdown;
  195 static mpt_detach_handler_t    mpt_stddetach;
  196 static mpt_unload_handler_t    mpt_stdunload;
  197 static struct mpt_personality mpt_default_personality =
  198 {
  199         .load           = mpt_stdload,
  200         .probe          = mpt_stdprobe,
  201         .attach         = mpt_stdattach,
  202         .enable         = mpt_stdenable,
  203         .ready          = mpt_stdready,
  204         .event          = mpt_stdevent,
  205         .reset          = mpt_stdreset,
  206         .shutdown       = mpt_stdshutdown,
  207         .detach         = mpt_stddetach,
  208         .unload         = mpt_stdunload
  209 };
  210 
  211 static mpt_load_handler_t      mpt_core_load;
  212 static mpt_attach_handler_t    mpt_core_attach;
  213 static mpt_enable_handler_t    mpt_core_enable;
  214 static mpt_reset_handler_t     mpt_core_ioc_reset;
  215 static mpt_event_handler_t     mpt_core_event;
  216 static mpt_shutdown_handler_t  mpt_core_shutdown;
  217 static mpt_shutdown_handler_t  mpt_core_detach;
  218 static mpt_unload_handler_t    mpt_core_unload;
  219 static struct mpt_personality mpt_core_personality =
  220 {
  221         .name           = "mpt_core",
  222         .load           = mpt_core_load,
  223 //      .attach         = mpt_core_attach,
  224 //      .enable         = mpt_core_enable,
  225         .event          = mpt_core_event,
  226         .reset          = mpt_core_ioc_reset,
  227         .shutdown       = mpt_core_shutdown,
  228         .detach         = mpt_core_detach,
  229         .unload         = mpt_core_unload,
  230 };
  231 
  232 /*
  233  * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
  234  * ordering information.  We want the core to always register FIRST.
  235  * other modules are set to SI_ORDER_SECOND.
  236  */
  237 static moduledata_t mpt_core_mod = {
  238         "mpt_core", mpt_modevent, &mpt_core_personality
  239 };
  240 DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
  241 MODULE_VERSION(mpt_core, 1);
  242 
  243 #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
  244 
  245 int
  246 mpt_modevent(module_t mod, int type, void *data)
  247 {
  248         struct mpt_personality *pers;
  249         int error;
  250 
  251         pers = (struct mpt_personality *)data;
  252 
  253         error = 0;
  254         switch (type) {
  255         case MOD_LOAD:
  256         {
  257                 mpt_load_handler_t **def_handler;
  258                 mpt_load_handler_t **pers_handler;
  259                 int i;
  260 
  261                 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
  262                         if (mpt_personalities[i] == NULL)
  263                                 break;
  264                 }
  265                 if (i >= MPT_MAX_PERSONALITIES) {
  266                         error = ENOMEM;
  267                         break;
  268                 }
  269                 pers->id = i;
  270                 mpt_personalities[i] = pers;
  271 
  272                 /* Install standard/noop handlers for any NULL entries. */
  273                 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
  274                 pers_handler = MPT_PERS_FIRST_HANDLER(pers);
  275                 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
  276                         if (*pers_handler == NULL)
  277                                 *pers_handler = *def_handler;
  278                         pers_handler++;
  279                         def_handler++;
  280                 }
  281                 
  282                 error = (pers->load(pers));
  283                 if (error != 0)
  284                         mpt_personalities[i] = NULL;
  285                 break;
  286         }
  287         case MOD_SHUTDOWN:
  288                 break;
  289         case MOD_QUIESCE:
  290                 break;
  291         case MOD_UNLOAD:
  292                 error = pers->unload(pers);
  293                 mpt_personalities[pers->id] = NULL;
  294                 break;
  295         default:
  296                 error = EINVAL;
  297                 break;
  298         }
  299         return (error);
  300 }
  301 
  302 static int
  303 mpt_stdload(struct mpt_personality *pers)
  304 {
  305 
  306         /* Load is always successful. */
  307         return (0);
  308 }
  309 
  310 static int
  311 mpt_stdprobe(struct mpt_softc *mpt)
  312 {
  313 
  314         /* Probe is always successful. */
  315         return (0);
  316 }
  317 
  318 static int
  319 mpt_stdattach(struct mpt_softc *mpt)
  320 {
  321 
  322         /* Attach is always successful. */
  323         return (0);
  324 }
  325 
  326 static int
  327 mpt_stdenable(struct mpt_softc *mpt)
  328 {
  329 
  330         /* Enable is always successful. */
  331         return (0);
  332 }
  333 
  334 static void
  335 mpt_stdready(struct mpt_softc *mpt)
  336 {
  337 
  338 }
  339 
  340 static int
  341 mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
  342 {
  343 
  344         mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
  345         /* Event was not for us. */
  346         return (0);
  347 }
  348 
  349 static void
  350 mpt_stdreset(struct mpt_softc *mpt, int type)
  351 {
  352 
  353 }
  354 
  355 static void
  356 mpt_stdshutdown(struct mpt_softc *mpt)
  357 {
  358 
  359 }
  360 
  361 static void
  362 mpt_stddetach(struct mpt_softc *mpt)
  363 {
  364 
  365 }
  366 
  367 static int
  368 mpt_stdunload(struct mpt_personality *pers)
  369 {
  370 
  371         /* Unload is always successful. */
  372         return (0);
  373 }
  374 
  375 /*
  376  * Post driver attachment, we may want to perform some global actions.
  377  * Here is the hook to do so.
  378  */
  379 
  380 static void
  381 mpt_postattach(void *unused)
  382 {
  383         struct mpt_softc *mpt;
  384         struct mpt_personality *pers;
  385 
  386         TAILQ_FOREACH(mpt, &mpt_tailq, links) {
  387                 MPT_PERS_FOREACH(mpt, pers)
  388                         pers->ready(mpt);
  389         }
  390 }
  391 SYSINIT(mptdev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, mpt_postattach, NULL);
  392 
  393 /******************************* Bus DMA Support ******************************/
  394 void
  395 mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  396 {
  397         struct mpt_map_info *map_info;
  398 
  399         map_info = (struct mpt_map_info *)arg;
  400         map_info->error = error;
  401         map_info->phys = segs->ds_addr;
  402 }
  403 
  404 /**************************** Reply/Event Handling ****************************/
  405 int
  406 mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
  407                      mpt_handler_t handler, uint32_t *phandler_id)
  408 {
  409 
  410         switch (type) {
  411         case MPT_HANDLER_REPLY:
  412         {
  413                 u_int cbi;
  414                 u_int free_cbi;
  415 
  416                 if (phandler_id == NULL)
  417                         return (EINVAL);
  418 
  419                 free_cbi = MPT_HANDLER_ID_NONE;
  420                 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
  421                         /*
  422                          * If the same handler is registered multiple
  423                          * times, don't error out.  Just return the
  424                          * index of the original registration.
  425                          */
  426                         if (mpt_reply_handlers[cbi] == handler.reply_handler) {
  427                                 *phandler_id = MPT_CBI_TO_HID(cbi);
  428                                 return (0);
  429                         }
  430 
  431                         /*
  432                          * Fill from the front in the hope that
  433                          * all registered handlers consume only a
  434                          * single cache line.
  435                          *
  436                          * We don't break on the first empty slot so
  437                          * that the full table is checked to see if
  438                          * this handler was previously registered.
  439                          */
  440                         if (free_cbi == MPT_HANDLER_ID_NONE &&
  441                             (mpt_reply_handlers[cbi]
  442                           == mpt_default_reply_handler))
  443                                 free_cbi = cbi;
  444                 }
  445                 if (free_cbi == MPT_HANDLER_ID_NONE) {
  446                         return (ENOMEM);
  447                 }
  448                 mpt_reply_handlers[free_cbi] = handler.reply_handler;
  449                 *phandler_id = MPT_CBI_TO_HID(free_cbi);
  450                 break;
  451         }
  452         default:
  453                 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
  454                 return (EINVAL);
  455         }
  456         return (0);
  457 }
  458 
  459 int
  460 mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
  461                        mpt_handler_t handler, uint32_t handler_id)
  462 {
  463 
  464         switch (type) {
  465         case MPT_HANDLER_REPLY:
  466         {
  467                 u_int cbi;
  468 
  469                 cbi = MPT_CBI(handler_id);
  470                 if (cbi >= MPT_NUM_REPLY_HANDLERS
  471                  || mpt_reply_handlers[cbi] != handler.reply_handler)
  472                         return (ENOENT);
  473                 mpt_reply_handlers[cbi] = mpt_default_reply_handler;
  474                 break;
  475         }
  476         default:
  477                 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
  478                 return (EINVAL);
  479         }
  480         return (0);
  481 }
  482 
  483 static int
  484 mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
  485         uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
  486 {
  487 
  488         mpt_prt(mpt,
  489             "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
  490             req, req->serno, reply_desc, reply_frame);
  491 
  492         if (reply_frame != NULL)
  493                 mpt_dump_reply_frame(mpt, reply_frame);
  494 
  495         mpt_prt(mpt, "Reply Frame Ignored\n");
  496 
  497         return (/*free_reply*/TRUE);
  498 }
  499 
  500 static int
  501 mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
  502  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
  503 {
  504 
  505         if (req != NULL) {
  506                 if (reply_frame != NULL) {
  507                         MSG_CONFIG *cfgp;
  508                         MSG_CONFIG_REPLY *reply;
  509 
  510                         cfgp = (MSG_CONFIG *)req->req_vbuf;
  511                         reply = (MSG_CONFIG_REPLY *)reply_frame;
  512                         req->IOCStatus = le16toh(reply_frame->IOCStatus);
  513                         bcopy(&reply->Header, &cfgp->Header,
  514                               sizeof(cfgp->Header));
  515                         cfgp->ExtPageLength = reply->ExtPageLength;
  516                         cfgp->ExtPageType = reply->ExtPageType;
  517                 }
  518                 req->state &= ~REQ_STATE_QUEUED;
  519                 req->state |= REQ_STATE_DONE;
  520                 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
  521                 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
  522                         wakeup(req);
  523                 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
  524                         /*
  525                          * Whew- we can free this request (late completion)
  526                          */
  527                         mpt_free_request(mpt, req);
  528                 }
  529         }
  530 
  531         return (TRUE);
  532 }
  533 
  534 static int
  535 mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
  536  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
  537 {
  538 
  539         /* Nothing to be done. */
  540         return (TRUE);
  541 }
  542 
  543 static int
  544 mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
  545     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
  546 {
  547         int free_reply;
  548 
  549         KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler"));
  550         KASSERT(req != NULL, ("null request in mpt_event_reply_handler"));
  551 
  552         free_reply = TRUE;
  553         switch (reply_frame->Function) {
  554         case MPI_FUNCTION_EVENT_NOTIFICATION:
  555         {
  556                 MSG_EVENT_NOTIFY_REPLY *msg;
  557                 struct mpt_personality *pers;
  558                 u_int handled;
  559 
  560                 handled = 0;
  561                 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
  562                 msg->EventDataLength = le16toh(msg->EventDataLength);
  563                 msg->IOCStatus = le16toh(msg->IOCStatus);
  564                 msg->IOCLogInfo = le32toh(msg->IOCLogInfo);
  565                 msg->Event = le32toh(msg->Event);
  566                 MPT_PERS_FOREACH(mpt, pers)
  567                         handled += pers->event(mpt, req, msg);
  568 
  569                 if (handled == 0 && mpt->mpt_pers_mask == 0) {
  570                         mpt_lprt(mpt, MPT_PRT_INFO,
  571                                 "No Handlers For Any Event Notify Frames. "
  572                                 "Event %#x (ACK %sequired).\n",
  573                                 msg->Event, msg->AckRequired? "r" : "not r");
  574                 } else if (handled == 0) {
  575                         mpt_lprt(mpt,
  576                                 msg->AckRequired? MPT_PRT_WARN : MPT_PRT_INFO,
  577                                 "Unhandled Event Notify Frame. Event %#x "
  578                                 "(ACK %sequired).\n",
  579                                 msg->Event, msg->AckRequired? "r" : "not r");
  580                 }
  581 
  582                 if (msg->AckRequired) {
  583                         request_t *ack_req;
  584                         uint32_t context;
  585 
  586                         context = req->index | MPT_REPLY_HANDLER_EVENTS;
  587                         ack_req = mpt_get_request(mpt, FALSE);
  588                         if (ack_req == NULL) {
  589                                 struct mpt_evtf_record *evtf;
  590 
  591                                 evtf = (struct mpt_evtf_record *)reply_frame;
  592                                 evtf->context = context;
  593                                 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
  594                                 free_reply = FALSE;
  595                                 break;
  596                         }
  597                         mpt_send_event_ack(mpt, ack_req, msg, context);
  598                         /*
  599                          * Don't check for CONTINUATION_REPLY here
  600                          */
  601                         return (free_reply);
  602                 }
  603                 break;
  604         }
  605         case MPI_FUNCTION_PORT_ENABLE:
  606                 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
  607                 break;
  608         case MPI_FUNCTION_EVENT_ACK:
  609                 break;
  610         default:
  611                 mpt_prt(mpt, "unknown event function: %x\n",
  612                         reply_frame->Function);
  613                 break;
  614         }
  615 
  616         /*
  617          * I'm not sure that this continuation stuff works as it should.
  618          *
  619          * I've had FC async events occur that free the frame up because
  620          * the continuation bit isn't set, and then additional async events
  621          * then occur using the same context. As you might imagine, this
  622          * leads to Very Bad Thing.
  623          *
  624          *  Let's just be safe for now and not free them up until we figure
  625          * out what's actually happening here.
  626          */
  627 #if     0
  628         if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
  629                 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
  630                 mpt_free_request(mpt, req);
  631                 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
  632                     reply_frame->Function, req, req->serno);
  633                 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
  634                         MSG_EVENT_NOTIFY_REPLY *msg =
  635                             (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
  636                         mpt_prtc(mpt, " Event=0x%x AckReq=%d",
  637                             msg->Event, msg->AckRequired);
  638                 }
  639         } else {
  640                 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
  641                     reply_frame->Function, req, req->serno);
  642                 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
  643                         MSG_EVENT_NOTIFY_REPLY *msg =
  644                             (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
  645                         mpt_prtc(mpt, " Event=0x%x AckReq=%d",
  646                             msg->Event, msg->AckRequired);
  647                 }
  648                 mpt_prtc(mpt, "\n");
  649         }
  650 #endif
  651         return (free_reply);
  652 }
  653 
  654 /*
  655  * Process an asynchronous event from the IOC.
  656  */
  657 static int
  658 mpt_core_event(struct mpt_softc *mpt, request_t *req,
  659                MSG_EVENT_NOTIFY_REPLY *msg)
  660 {
  661 
  662         mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
  663                  msg->Event & 0xFF);
  664         switch(msg->Event & 0xFF) {
  665         case MPI_EVENT_NONE:
  666                 break;
  667         case MPI_EVENT_LOG_DATA:
  668         {
  669                 int i;
  670 
  671                 /* Some error occurred that LSI wants logged */
  672                 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
  673                         msg->IOCLogInfo);
  674                 mpt_prt(mpt, "\tEvtLogData: Event Data:");
  675                 for (i = 0; i < msg->EventDataLength; i++)
  676                         mpt_prtc(mpt, "  %08x", msg->Data[i]);
  677                 mpt_prtc(mpt, "\n");
  678                 break;
  679         }
  680         case MPI_EVENT_EVENT_CHANGE:
  681                 /*
  682                  * This is just an acknowledgement
  683                  * of our mpt_send_event_request.
  684                  */
  685                 break;
  686         case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
  687                 break;
  688         default:
  689                 return (0);
  690                 break;
  691         }
  692         return (1);
  693 }
  694 
  695 static void
  696 mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
  697                    MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
  698 {
  699         MSG_EVENT_ACK *ackp;
  700 
  701         ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
  702         memset(ackp, 0, sizeof (*ackp));
  703         ackp->Function = MPI_FUNCTION_EVENT_ACK;
  704         ackp->Event = htole32(msg->Event);
  705         ackp->EventContext = htole32(msg->EventContext);
  706         ackp->MsgContext = htole32(context);
  707         mpt_check_doorbell(mpt);
  708         mpt_send_cmd(mpt, ack_req);
  709 }
  710 
  711 /***************************** Interrupt Handling *****************************/
  712 void
  713 mpt_intr(void *arg)
  714 {
  715         struct mpt_softc *mpt;
  716         uint32_t reply_desc;
  717         int ntrips = 0;
  718 
  719         mpt = (struct mpt_softc *)arg;
  720         mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
  721         MPT_LOCK_ASSERT(mpt);
  722 
  723         while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
  724                 request_t         *req;
  725                 MSG_DEFAULT_REPLY *reply_frame;
  726                 uint32_t           reply_baddr;
  727                 uint32_t           ctxt_idx;
  728                 u_int              cb_index;
  729                 u_int              req_index;
  730                 u_int              offset;
  731                 int                free_rf;
  732 
  733                 req = NULL;
  734                 reply_frame = NULL;
  735                 reply_baddr = 0;
  736                 offset = 0;
  737                 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
  738                         /*
  739                          * Ensure that the reply frame is coherent.
  740                          */
  741                         reply_baddr = MPT_REPLY_BADDR(reply_desc);
  742                         offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
  743                         bus_dmamap_sync_range(mpt->reply_dmat,
  744                             mpt->reply_dmap, offset, MPT_REPLY_SIZE,
  745                             BUS_DMASYNC_POSTREAD);
  746                         reply_frame = MPT_REPLY_OTOV(mpt, offset);
  747                         ctxt_idx = le32toh(reply_frame->MsgContext);
  748                 } else {
  749                         uint32_t type;
  750 
  751                         type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
  752                         ctxt_idx = reply_desc;
  753                         mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
  754                                     reply_desc);
  755 
  756                         switch (type) {
  757                         case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
  758                                 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
  759                                 break;
  760                         case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
  761                                 ctxt_idx = GET_IO_INDEX(reply_desc);
  762                                 if (mpt->tgt_cmd_ptrs == NULL) {
  763                                         mpt_prt(mpt,
  764                                             "mpt_intr: no target cmd ptrs\n");
  765                                         reply_desc = MPT_REPLY_EMPTY;
  766                                         break;
  767                                 }
  768                                 if (ctxt_idx >= mpt->tgt_cmds_allocated) {
  769                                         mpt_prt(mpt,
  770                                             "mpt_intr: bad tgt cmd ctxt %u\n",
  771                                             ctxt_idx);
  772                                         reply_desc = MPT_REPLY_EMPTY;
  773                                         ntrips = 1000;
  774                                         break;
  775                                 }
  776                                 req = mpt->tgt_cmd_ptrs[ctxt_idx];
  777                                 if (req == NULL) {
  778                                         mpt_prt(mpt, "no request backpointer "
  779                                             "at index %u", ctxt_idx);
  780                                         reply_desc = MPT_REPLY_EMPTY;
  781                                         ntrips = 1000;
  782                                         break;
  783                                 }
  784                                 /*
  785                                  * Reformulate ctxt_idx to be just as if
  786                                  * it were another type of context reply
  787                                  * so the code below will find the request
  788                                  * via indexing into the pool.
  789                                  */
  790                                 ctxt_idx =
  791                                     req->index | mpt->scsi_tgt_handler_id;
  792                                 req = NULL;
  793                                 break;
  794                         case MPI_CONTEXT_REPLY_TYPE_LAN:
  795                                 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
  796                                     reply_desc);
  797                                 reply_desc = MPT_REPLY_EMPTY;
  798                                 break;
  799                         default:
  800                                 mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
  801                                 reply_desc = MPT_REPLY_EMPTY;
  802                                 break;
  803                         }
  804                         if (reply_desc == MPT_REPLY_EMPTY) {
  805                                 if (ntrips++ > 1000) {
  806                                         break;
  807                                 }
  808                                 continue;
  809                         }
  810                 }
  811 
  812                 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
  813                 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
  814                 if (req_index < MPT_MAX_REQUESTS(mpt)) {
  815                         req = &mpt->request_pool[req_index];
  816                 } else {
  817                         mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
  818                             " 0x%x)\n", req_index, reply_desc);
  819                 }
  820 
  821                 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
  822                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  823                 free_rf = mpt_reply_handlers[cb_index](mpt, req,
  824                     reply_desc, reply_frame);
  825 
  826                 if (reply_frame != NULL && free_rf) {
  827                         bus_dmamap_sync_range(mpt->reply_dmat,
  828                             mpt->reply_dmap, offset, MPT_REPLY_SIZE,
  829                             BUS_DMASYNC_PREREAD);
  830                         mpt_free_reply(mpt, reply_baddr);
  831                 }
  832 
  833                 /*
  834                  * If we got ourselves disabled, don't get stuck in a loop
  835                  */
  836                 if (mpt->disabled) {
  837                         mpt_disable_ints(mpt);
  838                         break;
  839                 }
  840                 if (ntrips++ > 1000) {
  841                         break;
  842                 }
  843         }
  844         mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n");
  845 }
  846 
  847 /******************************* Error Recovery *******************************/
  848 void
  849 mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
  850                             u_int iocstatus)
  851 {
  852         MSG_DEFAULT_REPLY  ioc_status_frame;
  853         request_t         *req;
  854 
  855         memset(&ioc_status_frame, 0, sizeof(ioc_status_frame));
  856         ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
  857         ioc_status_frame.IOCStatus = iocstatus;
  858         while((req = TAILQ_FIRST(chain)) != NULL) {
  859                 MSG_REQUEST_HEADER *msg_hdr;
  860                 u_int               cb_index;
  861 
  862                 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
  863                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  864                 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
  865                 ioc_status_frame.Function = msg_hdr->Function;
  866                 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
  867                 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
  868                 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
  869                     &ioc_status_frame);
  870                 if (mpt_req_on_pending_list(mpt, req) != 0)
  871                         TAILQ_REMOVE(chain, req, links);
  872         }
  873 }
  874 
  875 /********************************* Diagnostics ********************************/
  876 /*
  877  * Perform a diagnostic dump of a reply frame.
  878  */
  879 void
  880 mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
  881 {
  882 
  883         mpt_prt(mpt, "Address Reply:\n");
  884         mpt_print_reply(reply_frame);
  885 }
  886 
  887 /******************************* Doorbell Access ******************************/
  888 static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
  889 static __inline  uint32_t mpt_rd_intr(struct mpt_softc *mpt);
  890 
  891 static __inline uint32_t
  892 mpt_rd_db(struct mpt_softc *mpt)
  893 {
  894 
  895         return mpt_read(mpt, MPT_OFFSET_DOORBELL);
  896 }
  897 
  898 static __inline uint32_t
  899 mpt_rd_intr(struct mpt_softc *mpt)
  900 {
  901 
  902         return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
  903 }
  904 
  905 /* Busy wait for a door bell to be read by IOC */
  906 static int
  907 mpt_wait_db_ack(struct mpt_softc *mpt)
  908 {
  909         int i;
  910 
  911         for (i=0; i < MPT_MAX_WAIT; i++) {
  912                 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
  913                         maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
  914                         return (MPT_OK);
  915                 }
  916                 DELAY(200);
  917         }
  918         return (MPT_FAIL);
  919 }
  920 
  921 /* Busy wait for a door bell interrupt */
  922 static int
  923 mpt_wait_db_int(struct mpt_softc *mpt)
  924 {
  925         int i;
  926 
  927         for (i = 0; i < MPT_MAX_WAIT; i++) {
  928                 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
  929                         maxwait_int = i > maxwait_int ? i : maxwait_int;
  930                         return MPT_OK;
  931                 }
  932                 DELAY(100);
  933         }
  934         return (MPT_FAIL);
  935 }
  936 
  937 /* Wait for IOC to transition to a give state */
  938 void
  939 mpt_check_doorbell(struct mpt_softc *mpt)
  940 {
  941         uint32_t db = mpt_rd_db(mpt);
  942 
  943         if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
  944                 mpt_prt(mpt, "Device not running\n");
  945                 mpt_print_db(db);
  946         }
  947 }
  948 
  949 /* Wait for IOC to transition to a give state */
  950 static int
  951 mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
  952 {
  953         int i;
  954 
  955         for (i = 0; i < MPT_MAX_WAIT; i++) {
  956                 uint32_t db = mpt_rd_db(mpt);
  957                 if (MPT_STATE(db) == state) {
  958                         maxwait_state = i > maxwait_state ? i : maxwait_state;
  959                         return (MPT_OK);
  960                 }
  961                 DELAY(100);
  962         }
  963         return (MPT_FAIL);
  964 }
  965 
  966 
  967 /************************* Intialization/Configuration ************************/
  968 static int mpt_download_fw(struct mpt_softc *mpt);
  969 
  970 /* Issue the reset COMMAND to the IOC */
  971 static int
  972 mpt_soft_reset(struct mpt_softc *mpt)
  973 {
  974 
  975         mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
  976 
  977         /* Have to use hard reset if we are not in Running state */
  978         if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
  979                 mpt_prt(mpt, "soft reset failed: device not running\n");
  980                 return (MPT_FAIL);
  981         }
  982 
  983         /* If door bell is in use we don't have a chance of getting
  984          * a word in since the IOC probably crashed in message
  985          * processing. So don't waste our time.
  986          */
  987         if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
  988                 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
  989                 return (MPT_FAIL);
  990         }
  991 
  992         /* Send the reset request to the IOC */
  993         mpt_write(mpt, MPT_OFFSET_DOORBELL,
  994             MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
  995         if (mpt_wait_db_ack(mpt) != MPT_OK) {
  996                 mpt_prt(mpt, "soft reset failed: ack timeout\n");
  997                 return (MPT_FAIL);
  998         }
  999 
 1000         /* Wait for the IOC to reload and come out of reset state */
 1001         if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
 1002                 mpt_prt(mpt, "soft reset failed: device did not restart\n");
 1003                 return (MPT_FAIL);
 1004         }
 1005 
 1006         return MPT_OK;
 1007 }
 1008 
 1009 static int
 1010 mpt_enable_diag_mode(struct mpt_softc *mpt)
 1011 {
 1012         int try;
 1013 
 1014         try = 20;
 1015         while (--try) {
 1016 
 1017                 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
 1018                         break;
 1019 
 1020                 /* Enable diagnostic registers */
 1021                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
 1022                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
 1023                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
 1024                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
 1025                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
 1026                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
 1027 
 1028                 DELAY(100000);
 1029         }
 1030         if (try == 0)
 1031                 return (EIO);
 1032         return (0);
 1033 }
 1034 
 1035 static void
 1036 mpt_disable_diag_mode(struct mpt_softc *mpt)
 1037 {
 1038 
 1039         mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
 1040 }
 1041 
 1042 /* This is a magic diagnostic reset that resets all the ARM
 1043  * processors in the chip.
 1044  */
 1045 static void
 1046 mpt_hard_reset(struct mpt_softc *mpt)
 1047 {
 1048         int error;
 1049         int wait;
 1050         uint32_t diagreg;
 1051 
 1052         mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
 1053 
 1054         if (mpt->is_1078) {
 1055                 mpt_write(mpt, MPT_OFFSET_RESET_1078, 0x07);
 1056                 DELAY(1000);
 1057                 return;
 1058         }
 1059 
 1060         error = mpt_enable_diag_mode(mpt);
 1061         if (error) {
 1062                 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
 1063                 mpt_prt(mpt, "Trying to reset anyway.\n");
 1064         }
 1065 
 1066         diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
 1067 
 1068         /*
 1069          * This appears to be a workaround required for some
 1070          * firmware or hardware revs.
 1071          */
 1072         mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
 1073         DELAY(1000);
 1074 
 1075         /* Diag. port is now active so we can now hit the reset bit */
 1076         mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
 1077 
 1078         /*
 1079          * Ensure that the reset has finished.  We delay 1ms
 1080          * prior to reading the register to make sure the chip
 1081          * has sufficiently completed its reset to handle register
 1082          * accesses.
 1083          */
 1084         wait = 5000;
 1085         do {
 1086                 DELAY(1000);
 1087                 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
 1088         } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
 1089 
 1090         if (wait == 0) {
 1091                 mpt_prt(mpt, "WARNING - Failed hard reset! "
 1092                         "Trying to initialize anyway.\n");
 1093         }
 1094 
 1095         /*
 1096          * If we have firmware to download, it must be loaded before
 1097          * the controller will become operational.  Do so now.
 1098          */
 1099         if (mpt->fw_image != NULL) {
 1100 
 1101                 error = mpt_download_fw(mpt);
 1102 
 1103                 if (error) {
 1104                         mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
 1105                         mpt_prt(mpt, "Trying to initialize anyway.\n");
 1106                 }
 1107         }
 1108 
 1109         /*
 1110          * Reseting the controller should have disabled write
 1111          * access to the diagnostic registers, but disable
 1112          * manually to be sure.
 1113          */
 1114         mpt_disable_diag_mode(mpt);
 1115 }
 1116 
 1117 static void
 1118 mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
 1119 {
 1120 
 1121         /*
 1122          * Complete all pending requests with a status
 1123          * appropriate for an IOC reset.
 1124          */
 1125         mpt_complete_request_chain(mpt, &mpt->request_pending_list,
 1126                                    MPI_IOCSTATUS_INVALID_STATE);
 1127 }
 1128 
 1129 /*
 1130  * Reset the IOC when needed. Try software command first then if needed
 1131  * poke at the magic diagnostic reset. Note that a hard reset resets
 1132  * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
 1133  * fouls up the PCI configuration registers.
 1134  */
 1135 int
 1136 mpt_reset(struct mpt_softc *mpt, int reinit)
 1137 {
 1138         struct  mpt_personality *pers;
 1139         int     ret;
 1140         int     retry_cnt = 0;
 1141 
 1142         /*
 1143          * Try a soft reset. If that fails, get out the big hammer.
 1144          */
 1145  again:
 1146         if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
 1147                 int     cnt;
 1148                 for (cnt = 0; cnt < 5; cnt++) {
 1149                         /* Failed; do a hard reset */
 1150                         mpt_hard_reset(mpt);
 1151 
 1152                         /*
 1153                          * Wait for the IOC to reload
 1154                          * and come out of reset state
 1155                          */
 1156                         ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
 1157                         if (ret == MPT_OK) {
 1158                                 break;
 1159                         }
 1160                         /*
 1161                          * Okay- try to check again...
 1162                          */
 1163                         ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
 1164                         if (ret == MPT_OK) {
 1165                                 break;
 1166                         }
 1167                         mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
 1168                             retry_cnt, cnt);
 1169                 }
 1170         }
 1171 
 1172         if (retry_cnt == 0) {
 1173                 /*
 1174                  * Invoke reset handlers.  We bump the reset count so
 1175                  * that mpt_wait_req() understands that regardless of
 1176                  * the specified wait condition, it should stop its wait.
 1177                  */
 1178                 mpt->reset_cnt++;
 1179                 MPT_PERS_FOREACH(mpt, pers)
 1180                         pers->reset(mpt, ret);
 1181         }
 1182 
 1183         if (reinit) {
 1184                 ret = mpt_enable_ioc(mpt, 1);
 1185                 if (ret == MPT_OK) {
 1186                         mpt_enable_ints(mpt);
 1187                 }
 1188         }
 1189         if (ret != MPT_OK && retry_cnt++ < 2) {
 1190                 goto again;
 1191         }
 1192         return ret;
 1193 }
 1194 
 1195 /* Return a command buffer to the free queue */
 1196 void
 1197 mpt_free_request(struct mpt_softc *mpt, request_t *req)
 1198 {
 1199         request_t *nxt;
 1200         struct mpt_evtf_record *record;
 1201         uint32_t offset, reply_baddr;
 1202         
 1203         if (req == NULL || req != &mpt->request_pool[req->index]) {
 1204                 panic("mpt_free_request: bad req ptr");
 1205         }
 1206         if ((nxt = req->chain) != NULL) {
 1207                 req->chain = NULL;
 1208                 mpt_free_request(mpt, nxt);     /* NB: recursion */
 1209         }
 1210         KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
 1211         KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
 1212         MPT_LOCK_ASSERT(mpt);
 1213         KASSERT(mpt_req_on_free_list(mpt, req) == 0,
 1214             ("mpt_free_request: req %p:%u func %x already on freelist",
 1215             req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
 1216         KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
 1217             ("mpt_free_request: req %p:%u func %x on pending list",
 1218             req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
 1219 #ifdef  INVARIANTS
 1220         mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__);
 1221 #endif
 1222 
 1223         req->ccb = NULL;
 1224         if (LIST_EMPTY(&mpt->ack_frames)) {
 1225                 /*
 1226                  * Insert free ones at the tail
 1227                  */
 1228                 req->serno = 0;
 1229                 req->state = REQ_STATE_FREE;
 1230 #ifdef  INVARIANTS
 1231                 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER));
 1232 #endif
 1233                 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
 1234                 if (mpt->getreqwaiter != 0) {
 1235                         mpt->getreqwaiter = 0;
 1236                         wakeup(&mpt->request_free_list);
 1237                 }
 1238                 return;
 1239         }
 1240 
 1241         /*
 1242          * Process an ack frame deferred due to resource shortage.
 1243          */
 1244         record = LIST_FIRST(&mpt->ack_frames);
 1245         LIST_REMOVE(record, links);
 1246         req->state = REQ_STATE_ALLOCATED;
 1247         mpt_assign_serno(mpt, req);
 1248         mpt_send_event_ack(mpt, req, &record->reply, record->context);
 1249         offset = (uint32_t)((uint8_t *)record - mpt->reply);
 1250         reply_baddr = offset + (mpt->reply_phys & 0xFFFFFFFF);
 1251         bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap, offset,
 1252             MPT_REPLY_SIZE, BUS_DMASYNC_PREREAD);
 1253         mpt_free_reply(mpt, reply_baddr);
 1254 }
 1255 
 1256 /* Get a command buffer from the free queue */
 1257 request_t *
 1258 mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
 1259 {
 1260         request_t *req;
 1261 
 1262 retry:
 1263         MPT_LOCK_ASSERT(mpt);
 1264         req = TAILQ_FIRST(&mpt->request_free_list);
 1265         if (req != NULL) {
 1266                 KASSERT(req == &mpt->request_pool[req->index],
 1267                     ("mpt_get_request: corrupted request free list"));
 1268                 KASSERT(req->state == REQ_STATE_FREE,
 1269                     ("req %p:%u not free on free list %x index %d function %x",
 1270                     req, req->serno, req->state, req->index,
 1271                     ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
 1272                 TAILQ_REMOVE(&mpt->request_free_list, req, links);
 1273                 req->state = REQ_STATE_ALLOCATED;
 1274                 req->chain = NULL;
 1275                 mpt_assign_serno(mpt, req);
 1276         } else if (sleep_ok != 0) {
 1277                 mpt->getreqwaiter = 1;
 1278                 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
 1279                 goto retry;
 1280         }
 1281         return (req);
 1282 }
 1283 
 1284 /* Pass the command to the IOC */
 1285 void
 1286 mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
 1287 {
 1288 
 1289         if (mpt->verbose > MPT_PRT_DEBUG2) {
 1290                 mpt_dump_request(mpt, req);
 1291         }
 1292         bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
 1293             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1294         req->state |= REQ_STATE_QUEUED;
 1295         KASSERT(mpt_req_on_free_list(mpt, req) == 0,
 1296             ("req %p:%u func %x on freelist list in mpt_send_cmd",
 1297             req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
 1298         KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
 1299             ("req %p:%u func %x already on pending list in mpt_send_cmd",
 1300             req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
 1301         TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
 1302         mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
 1303 }
 1304 
 1305 /*
 1306  * Wait for a request to complete.
 1307  *
 1308  * Inputs:
 1309  *      mpt             softc of controller executing request
 1310  *      req             request to wait for
 1311  *      sleep_ok        nonzero implies may sleep in this context
 1312  *      time_ms         timeout in ms.  0 implies no timeout.
 1313  *
 1314  * Return Values:
 1315  *      0               Request completed
 1316  *      non-0           Timeout fired before request completion.
 1317  */
 1318 int
 1319 mpt_wait_req(struct mpt_softc *mpt, request_t *req,
 1320              mpt_req_state_t state, mpt_req_state_t mask,
 1321              int sleep_ok, int time_ms)
 1322 {
 1323         int   error;
 1324         int   timeout;
 1325         u_int saved_cnt;
 1326 
 1327         /*
 1328          * timeout is in ms.  0 indicates infinite wait.
 1329          * Convert to ticks or 500us units depending on
 1330          * our sleep mode.
 1331          */
 1332         if (sleep_ok != 0) {
 1333                 timeout = (time_ms * hz) / 1000;
 1334         } else {
 1335                 timeout = time_ms * 2;
 1336         }
 1337         req->state |= REQ_STATE_NEED_WAKEUP;
 1338         mask &= ~REQ_STATE_NEED_WAKEUP;
 1339         saved_cnt = mpt->reset_cnt;
 1340         while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
 1341                 if (sleep_ok != 0) {
 1342                         error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
 1343                         if (error == EWOULDBLOCK) {
 1344                                 timeout = 0;
 1345                                 break;
 1346                         }
 1347                 } else {
 1348                         if (time_ms != 0 && --timeout == 0) {
 1349                                 break;
 1350                         }
 1351                         DELAY(500);
 1352                         mpt_intr(mpt);
 1353                 }
 1354         }
 1355         req->state &= ~REQ_STATE_NEED_WAKEUP;
 1356         if (mpt->reset_cnt != saved_cnt) {
 1357                 return (EIO);
 1358         }
 1359         if (time_ms && timeout <= 0) {
 1360                 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
 1361                 req->state |= REQ_STATE_TIMEDOUT;
 1362                 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
 1363                 return (ETIMEDOUT);
 1364         }
 1365         return (0);
 1366 }
 1367 
 1368 /*
 1369  * Send a command to the IOC via the handshake register.
 1370  *
 1371  * Only done at initialization time and for certain unusual
 1372  * commands such as device/bus reset as specified by LSI.
 1373  */
 1374 int
 1375 mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
 1376 {
 1377         int i;
 1378         uint32_t data, *data32;
 1379 
 1380         /* Check condition of the IOC */
 1381         data = mpt_rd_db(mpt);
 1382         if ((MPT_STATE(data) != MPT_DB_STATE_READY
 1383           && MPT_STATE(data) != MPT_DB_STATE_RUNNING
 1384           && MPT_STATE(data) != MPT_DB_STATE_FAULT)
 1385          || MPT_DB_IS_IN_USE(data)) {
 1386                 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
 1387                 mpt_print_db(data);
 1388                 return (EBUSY);
 1389         }
 1390 
 1391         /* We move things in 32 bit chunks */
 1392         len = (len + 3) >> 2;
 1393         data32 = cmd;
 1394 
 1395         /* Clear any left over pending doorbell interrupts */
 1396         if (MPT_DB_INTR(mpt_rd_intr(mpt)))
 1397                 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1398 
 1399         /*
 1400          * Tell the handshake reg. we are going to send a command
 1401          * and how long it is going to be.
 1402          */
 1403         data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
 1404             (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
 1405         mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
 1406 
 1407         /* Wait for the chip to notice */
 1408         if (mpt_wait_db_int(mpt) != MPT_OK) {
 1409                 mpt_prt(mpt, "mpt_send_handshake_cmd: db ignored\n");
 1410                 return (ETIMEDOUT);
 1411         }
 1412 
 1413         /* Clear the interrupt */
 1414         mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1415 
 1416         if (mpt_wait_db_ack(mpt) != MPT_OK) {
 1417                 mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n");
 1418                 return (ETIMEDOUT);
 1419         }
 1420 
 1421         /* Send the command */
 1422         for (i = 0; i < len; i++) {
 1423                 mpt_write(mpt, MPT_OFFSET_DOORBELL, htole32(*data32++));
 1424                 if (mpt_wait_db_ack(mpt) != MPT_OK) {
 1425                         mpt_prt(mpt,
 1426                             "mpt_send_handshake_cmd: timeout @ index %d\n", i);
 1427                         return (ETIMEDOUT);
 1428                 }
 1429         }
 1430         return MPT_OK;
 1431 }
 1432 
 1433 /* Get the response from the handshake register */
 1434 int
 1435 mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
 1436 {
 1437         int left, reply_left;
 1438         u_int16_t *data16;
 1439         uint32_t data;
 1440         MSG_DEFAULT_REPLY *hdr;
 1441 
 1442         /* We move things out in 16 bit chunks */
 1443         reply_len >>= 1;
 1444         data16 = (u_int16_t *)reply;
 1445 
 1446         hdr = (MSG_DEFAULT_REPLY *)reply;
 1447 
 1448         /* Get first word */
 1449         if (mpt_wait_db_int(mpt) != MPT_OK) {
 1450                 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
 1451                 return ETIMEDOUT;
 1452         }
 1453         data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
 1454         *data16++ = le16toh(data & MPT_DB_DATA_MASK);
 1455         mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1456 
 1457         /* Get Second Word */
 1458         if (mpt_wait_db_int(mpt) != MPT_OK) {
 1459                 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
 1460                 return ETIMEDOUT;
 1461         }
 1462         data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
 1463         *data16++ = le16toh(data & MPT_DB_DATA_MASK);
 1464         mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1465 
 1466         /*
 1467          * With the second word, we can now look at the length.
 1468          * Warn about a reply that's too short (except for IOC FACTS REPLY)
 1469          */
 1470         if ((reply_len >> 1) != hdr->MsgLength &&
 1471             (hdr->Function != MPI_FUNCTION_IOC_FACTS)){
 1472                 mpt_prt(mpt, "reply length does not match message length: "
 1473                         "got %x; expected %zx for function %x\n",
 1474                         hdr->MsgLength << 2, reply_len << 1, hdr->Function);
 1475         }
 1476 
 1477         /* Get rest of the reply; but don't overflow the provided buffer */
 1478         left = (hdr->MsgLength << 1) - 2;
 1479         reply_left =  reply_len - 2;
 1480         while (left--) {
 1481                 u_int16_t datum;
 1482 
 1483                 if (mpt_wait_db_int(mpt) != MPT_OK) {
 1484                         mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
 1485                         return ETIMEDOUT;
 1486                 }
 1487                 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
 1488                 datum = le16toh(data & MPT_DB_DATA_MASK);
 1489 
 1490                 if (reply_left-- > 0)
 1491                         *data16++ = datum;
 1492 
 1493                 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1494         }
 1495 
 1496         /* One more wait & clear at the end */
 1497         if (mpt_wait_db_int(mpt) != MPT_OK) {
 1498                 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
 1499                 return ETIMEDOUT;
 1500         }
 1501         mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1502 
 1503         if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
 1504                 if (mpt->verbose >= MPT_PRT_TRACE)
 1505                         mpt_print_reply(hdr);
 1506                 return (MPT_FAIL | hdr->IOCStatus);
 1507         }
 1508 
 1509         return (0);
 1510 }
 1511 
 1512 static int
 1513 mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
 1514 {
 1515         MSG_IOC_FACTS f_req;
 1516         int error;
 1517         
 1518         memset(&f_req, 0, sizeof f_req);
 1519         f_req.Function = MPI_FUNCTION_IOC_FACTS;
 1520         f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
 1521         error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
 1522         if (error) {
 1523                 return(error);
 1524         }
 1525         error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
 1526         return (error);
 1527 }
 1528 
 1529 static int
 1530 mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp)
 1531 {
 1532         MSG_PORT_FACTS f_req;
 1533         int error;
 1534         
 1535         memset(&f_req, 0, sizeof f_req);
 1536         f_req.Function = MPI_FUNCTION_PORT_FACTS;
 1537         f_req.PortNumber = port;
 1538         f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
 1539         error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
 1540         if (error) {
 1541                 return(error);
 1542         }
 1543         error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
 1544         return (error);
 1545 }
 1546 
 1547 /*
 1548  * Send the initialization request. This is where we specify how many
 1549  * SCSI busses and how many devices per bus we wish to emulate.
 1550  * This is also the command that specifies the max size of the reply
 1551  * frames from the IOC that we will be allocating.
 1552  */
 1553 static int
 1554 mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
 1555 {
 1556         int error = 0;
 1557         MSG_IOC_INIT init;
 1558         MSG_IOC_INIT_REPLY reply;
 1559 
 1560         memset(&init, 0, sizeof init);
 1561         init.WhoInit = who;
 1562         init.Function = MPI_FUNCTION_IOC_INIT;
 1563         init.MaxDevices = 0;    /* at least 256 devices per bus */
 1564         init.MaxBuses = 16;     /* at least 16 busses */
 1565 
 1566         init.MsgVersion = htole16(MPI_VERSION);
 1567         init.HeaderVersion = htole16(MPI_HEADER_VERSION);
 1568         init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
 1569         init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
 1570 
 1571         if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
 1572                 return(error);
 1573         }
 1574 
 1575         error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
 1576         return (error);
 1577 }
 1578 
 1579 
 1580 /*
 1581  * Utiltity routine to read configuration headers and pages
 1582  */
 1583 int
 1584 mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, cfgparms_t *params,
 1585                   bus_addr_t addr, bus_size_t len, int sleep_ok, int timeout_ms)
 1586 {
 1587         MSG_CONFIG *cfgp;
 1588         SGE_SIMPLE32 *se;
 1589 
 1590         cfgp = req->req_vbuf;
 1591         memset(cfgp, 0, sizeof *cfgp);
 1592         cfgp->Action = params->Action;
 1593         cfgp->Function = MPI_FUNCTION_CONFIG;
 1594         cfgp->Header.PageVersion = params->PageVersion;
 1595         cfgp->Header.PageNumber = params->PageNumber;
 1596         cfgp->PageAddress = htole32(params->PageAddress);
 1597         if ((params->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
 1598             MPI_CONFIG_PAGETYPE_EXTENDED) {
 1599                 cfgp->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
 1600                 cfgp->Header.PageLength = 0;
 1601                 cfgp->ExtPageLength = htole16(params->ExtPageLength);
 1602                 cfgp->ExtPageType = params->ExtPageType;
 1603         } else {
 1604                 cfgp->Header.PageType = params->PageType;
 1605                 cfgp->Header.PageLength = params->PageLength;
 1606         }
 1607         se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
 1608         se->Address = htole32(addr);
 1609         MPI_pSGE_SET_LENGTH(se, len);
 1610         MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
 1611             MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
 1612             MPI_SGE_FLAGS_END_OF_LIST |
 1613             ((params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
 1614           || params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
 1615            ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
 1616         se->FlagsLength = htole32(se->FlagsLength);
 1617         cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
 1618 
 1619         mpt_check_doorbell(mpt);
 1620         mpt_send_cmd(mpt, req);
 1621         return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
 1622                              sleep_ok, timeout_ms));
 1623 }
 1624 
 1625 int
 1626 mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, int PageNumber,
 1627                        uint32_t PageAddress, int ExtPageType,
 1628                        CONFIG_EXTENDED_PAGE_HEADER *rslt,
 1629                        int sleep_ok, int timeout_ms)
 1630 {
 1631         request_t  *req;
 1632         cfgparms_t params;
 1633         MSG_CONFIG_REPLY *cfgp;
 1634         int         error;
 1635 
 1636         req = mpt_get_request(mpt, sleep_ok);
 1637         if (req == NULL) {
 1638                 mpt_prt(mpt, "mpt_extread_cfg_header: Get request failed!\n");
 1639                 return (ENOMEM);
 1640         }
 1641 
 1642         params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
 1643         params.PageVersion = PageVersion;
 1644         params.PageLength = 0;
 1645         params.PageNumber = PageNumber;
 1646         params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
 1647         params.PageAddress = PageAddress;
 1648         params.ExtPageType = ExtPageType;
 1649         params.ExtPageLength = 0;
 1650         error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
 1651                                   sleep_ok, timeout_ms);
 1652         if (error != 0) {
 1653                 /*
 1654                  * Leave the request. Without resetting the chip, it's
 1655                  * still owned by it and we'll just get into trouble
 1656                  * freeing it now. Mark it as abandoned so that if it
 1657                  * shows up later it can be freed.
 1658                  */
 1659                 mpt_prt(mpt, "read_extcfg_header timed out\n");
 1660                 return (ETIMEDOUT);
 1661         }
 1662 
 1663         switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
 1664         case MPI_IOCSTATUS_SUCCESS:
 1665                 cfgp = req->req_vbuf;
 1666                 rslt->PageVersion = cfgp->Header.PageVersion;
 1667                 rslt->PageNumber = cfgp->Header.PageNumber;
 1668                 rslt->PageType = cfgp->Header.PageType;
 1669                 rslt->ExtPageLength = le16toh(cfgp->ExtPageLength);
 1670                 rslt->ExtPageType = cfgp->ExtPageType;
 1671                 error = 0;
 1672                 break;
 1673         case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
 1674                 mpt_lprt(mpt, MPT_PRT_DEBUG,
 1675                     "Invalid Page Type %d Number %d Addr 0x%0x\n",
 1676                     MPI_CONFIG_PAGETYPE_EXTENDED, PageNumber, PageAddress);
 1677                 error = EINVAL;
 1678                 break;
 1679         default:
 1680                 mpt_prt(mpt, "mpt_read_extcfg_header: Config Info Status %x\n",
 1681                         req->IOCStatus);
 1682                 error = EIO;
 1683                 break;
 1684         }
 1685         mpt_free_request(mpt, req);
 1686         return (error);
 1687 }
 1688 
 1689 int
 1690 mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
 1691                      CONFIG_EXTENDED_PAGE_HEADER *hdr, void *buf, size_t len,
 1692                      int sleep_ok, int timeout_ms)
 1693 {
 1694         request_t    *req;
 1695         cfgparms_t    params;
 1696         int           error;
 1697 
 1698         req = mpt_get_request(mpt, sleep_ok);
 1699         if (req == NULL) {
 1700                 mpt_prt(mpt, "mpt_read_extcfg_page: Get request failed!\n");
 1701                 return (-1);
 1702         }
 1703 
 1704         params.Action = Action;
 1705         params.PageVersion = hdr->PageVersion;
 1706         params.PageLength = 0;
 1707         params.PageNumber = hdr->PageNumber;
 1708         params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
 1709         params.PageAddress = PageAddress;
 1710         params.ExtPageType = hdr->ExtPageType;
 1711         params.ExtPageLength = hdr->ExtPageLength;
 1712         error = mpt_issue_cfg_req(mpt, req, &params,
 1713                                   req->req_pbuf + MPT_RQSL(mpt),
 1714                                   len, sleep_ok, timeout_ms);
 1715         if (error != 0) {
 1716                 mpt_prt(mpt, "read_extcfg_page(%d) timed out\n", Action);
 1717                 return (-1);
 1718         }
 1719 
 1720         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
 1721                 mpt_prt(mpt, "mpt_read_extcfg_page: Config Info Status %x\n",
 1722                         req->IOCStatus);
 1723                 mpt_free_request(mpt, req);
 1724                 return (-1);
 1725         }
 1726         memcpy(buf, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
 1727         mpt_free_request(mpt, req);
 1728         return (0);
 1729 }
 1730 
 1731 int
 1732 mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
 1733                     uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
 1734                     int sleep_ok, int timeout_ms)
 1735 {
 1736         request_t  *req;
 1737         cfgparms_t params;
 1738         MSG_CONFIG *cfgp;
 1739         int         error;
 1740 
 1741         req = mpt_get_request(mpt, sleep_ok);
 1742         if (req == NULL) {
 1743                 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
 1744                 return (ENOMEM);
 1745         }
 1746 
 1747         params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
 1748         params.PageVersion = 0;
 1749         params.PageLength = 0;
 1750         params.PageNumber = PageNumber;
 1751         params.PageType = PageType;
 1752         params.PageAddress = PageAddress;
 1753         error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
 1754                                   sleep_ok, timeout_ms);
 1755         if (error != 0) {
 1756                 /*
 1757                  * Leave the request. Without resetting the chip, it's
 1758                  * still owned by it and we'll just get into trouble
 1759                  * freeing it now. Mark it as abandoned so that if it
 1760                  * shows up later it can be freed.
 1761                  */
 1762                 mpt_prt(mpt, "read_cfg_header timed out\n");
 1763                 return (ETIMEDOUT);
 1764         }
 1765 
 1766         switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
 1767         case MPI_IOCSTATUS_SUCCESS:
 1768                 cfgp = req->req_vbuf;
 1769                 bcopy(&cfgp->Header, rslt, sizeof(*rslt));
 1770                 error = 0;
 1771                 break;
 1772         case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
 1773                 mpt_lprt(mpt, MPT_PRT_DEBUG,
 1774                     "Invalid Page Type %d Number %d Addr 0x%0x\n",
 1775                     PageType, PageNumber, PageAddress);
 1776                 error = EINVAL;
 1777                 break;
 1778         default:
 1779                 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
 1780                         req->IOCStatus);
 1781                 error = EIO;
 1782                 break;
 1783         }
 1784         mpt_free_request(mpt, req);
 1785         return (error);
 1786 }
 1787 
 1788 int
 1789 mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
 1790                   CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
 1791                   int timeout_ms)
 1792 {
 1793         request_t    *req;
 1794         cfgparms_t    params;
 1795         int           error;
 1796 
 1797         req = mpt_get_request(mpt, sleep_ok);
 1798         if (req == NULL) {
 1799                 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
 1800                 return (-1);
 1801         }
 1802 
 1803         params.Action = Action;
 1804         params.PageVersion = hdr->PageVersion;
 1805         params.PageLength = hdr->PageLength;
 1806         params.PageNumber = hdr->PageNumber;
 1807         params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
 1808         params.PageAddress = PageAddress;
 1809         error = mpt_issue_cfg_req(mpt, req, &params,
 1810                                   req->req_pbuf + MPT_RQSL(mpt),
 1811                                   len, sleep_ok, timeout_ms);
 1812         if (error != 0) {
 1813                 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
 1814                 return (-1);
 1815         }
 1816 
 1817         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
 1818                 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
 1819                         req->IOCStatus);
 1820                 mpt_free_request(mpt, req);
 1821                 return (-1);
 1822         }
 1823         memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
 1824         mpt_free_request(mpt, req);
 1825         return (0);
 1826 }
 1827 
 1828 int
 1829 mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
 1830                    CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
 1831                    int timeout_ms)
 1832 {
 1833         request_t    *req;
 1834         cfgparms_t    params;
 1835         u_int         hdr_attr;
 1836         int           error;
 1837 
 1838         hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
 1839         if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
 1840             hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
 1841                 mpt_prt(mpt, "page type 0x%x not changeable\n",
 1842                         hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
 1843                 return (-1);
 1844         }
 1845 
 1846 #if     0
 1847         /*
 1848          * We shouldn't mask off other bits here.
 1849          */
 1850         hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK;
 1851 #endif
 1852 
 1853         req = mpt_get_request(mpt, sleep_ok);
 1854         if (req == NULL)
 1855                 return (-1);
 1856 
 1857         memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len);
 1858 
 1859         /*
 1860          * There isn't any point in restoring stripped out attributes
 1861          * if you then mask them going down to issue the request.
 1862          */
 1863 
 1864         params.Action = Action;
 1865         params.PageVersion = hdr->PageVersion;
 1866         params.PageLength = hdr->PageLength;
 1867         params.PageNumber = hdr->PageNumber;
 1868         params.PageAddress = PageAddress;
 1869 #if     0
 1870         /* Restore stripped out attributes */
 1871         hdr->PageType |= hdr_attr;
 1872         params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
 1873 #else
 1874         params.PageType = hdr->PageType;
 1875 #endif
 1876         error = mpt_issue_cfg_req(mpt, req, &params,
 1877                                   req->req_pbuf + MPT_RQSL(mpt),
 1878                                   len, sleep_ok, timeout_ms);
 1879         if (error != 0) {
 1880                 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
 1881                 return (-1);
 1882         }
 1883 
 1884         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
 1885                 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
 1886                         req->IOCStatus);
 1887                 mpt_free_request(mpt, req);
 1888                 return (-1);
 1889         }
 1890         mpt_free_request(mpt, req);
 1891         return (0);
 1892 }
 1893 
 1894 /*
 1895  * Read IOC configuration information
 1896  */
 1897 static int
 1898 mpt_read_config_info_ioc(struct mpt_softc *mpt)
 1899 {
 1900         CONFIG_PAGE_HEADER hdr;
 1901         struct mpt_raid_volume *mpt_raid;
 1902         int rv;
 1903         int i;
 1904         size_t len;
 1905 
 1906         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
 1907                 2, 0, &hdr, FALSE, 5000);
 1908         /*
 1909          * If it's an invalid page, so what? Not a supported function....
 1910          */
 1911         if (rv == EINVAL) {
 1912                 return (0);
 1913         }
 1914         if (rv) {
 1915                 return (rv);
 1916         }
 1917 
 1918         mpt_lprt(mpt, MPT_PRT_DEBUG,
 1919             "IOC Page 2 Header: Version %x len %x PageNumber %x PageType %x\n",
 1920             hdr.PageVersion, hdr.PageLength << 2,
 1921             hdr.PageNumber, hdr.PageType);
 1922 
 1923         len = hdr.PageLength * sizeof(uint32_t);
 1924         mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 1925         if (mpt->ioc_page2 == NULL) {
 1926                 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
 1927                 mpt_raid_free_mem(mpt);
 1928                 return (ENOMEM);
 1929         }
 1930         memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
 1931         rv = mpt_read_cur_cfg_page(mpt, 0,
 1932             &mpt->ioc_page2->Header, len, FALSE, 5000);
 1933         if (rv) {
 1934                 mpt_prt(mpt, "failed to read IOC Page 2\n");
 1935                 mpt_raid_free_mem(mpt);
 1936                 return (EIO);
 1937         }
 1938         mpt2host_config_page_ioc2(mpt->ioc_page2);
 1939 
 1940         if (mpt->ioc_page2->CapabilitiesFlags != 0) {
 1941                 uint32_t mask;
 1942 
 1943                 mpt_prt(mpt, "Capabilities: (");
 1944                 for (mask = 1; mask != 0; mask <<= 1) {
 1945                         if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
 1946                                 continue;
 1947                         }
 1948                         switch (mask) {
 1949                         case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
 1950                                 mpt_prtc(mpt, " RAID-0");
 1951                                 break;
 1952                         case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
 1953                                 mpt_prtc(mpt, " RAID-1E");
 1954                                 break;
 1955                         case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
 1956                                 mpt_prtc(mpt, " RAID-1");
 1957                                 break;
 1958                         case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
 1959                                 mpt_prtc(mpt, " SES");
 1960                                 break;
 1961                         case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
 1962                                 mpt_prtc(mpt, " SAFTE");
 1963                                 break;
 1964                         case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
 1965                                 mpt_prtc(mpt, " Multi-Channel-Arrays");
 1966                         default:
 1967                                 break;
 1968                         }
 1969                 }
 1970                 mpt_prtc(mpt, " )\n");
 1971                 if ((mpt->ioc_page2->CapabilitiesFlags
 1972                    & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
 1973                     | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
 1974                     | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
 1975                         mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
 1976                                 mpt->ioc_page2->NumActiveVolumes,
 1977                                 mpt->ioc_page2->NumActiveVolumes != 1
 1978                               ? "s " : " ",
 1979                                 mpt->ioc_page2->MaxVolumes);
 1980                         mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
 1981                                 mpt->ioc_page2->NumActivePhysDisks,
 1982                                 mpt->ioc_page2->NumActivePhysDisks != 1
 1983                               ? "s " : " ",
 1984                                 mpt->ioc_page2->MaxPhysDisks);
 1985                 }
 1986         }
 1987 
 1988         len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
 1989         mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 1990         if (mpt->raid_volumes == NULL) {
 1991                 mpt_prt(mpt, "Could not allocate RAID volume data\n");
 1992                 mpt_raid_free_mem(mpt);
 1993                 return (ENOMEM);
 1994         }
 1995 
 1996         /*
 1997          * Copy critical data out of ioc_page2 so that we can
 1998          * safely refresh the page without windows of unreliable
 1999          * data.
 2000          */
 2001         mpt->raid_max_volumes =  mpt->ioc_page2->MaxVolumes;
 2002 
 2003         len = sizeof(*mpt->raid_volumes->config_page) +
 2004             (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
 2005         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
 2006                 mpt_raid = &mpt->raid_volumes[i];
 2007                 mpt_raid->config_page =
 2008                     malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 2009                 if (mpt_raid->config_page == NULL) {
 2010                         mpt_prt(mpt, "Could not allocate RAID page data\n");
 2011                         mpt_raid_free_mem(mpt);
 2012                         return (ENOMEM);
 2013                 }
 2014         }
 2015         mpt->raid_page0_len = len;
 2016 
 2017         len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
 2018         mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 2019         if (mpt->raid_disks == NULL) {
 2020                 mpt_prt(mpt, "Could not allocate RAID disk data\n");
 2021                 mpt_raid_free_mem(mpt);
 2022                 return (ENOMEM);
 2023         }
 2024         mpt->raid_max_disks =  mpt->ioc_page2->MaxPhysDisks;
 2025 
 2026         /*
 2027          * Load page 3.
 2028          */
 2029         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
 2030             3, 0, &hdr, FALSE, 5000);
 2031         if (rv) {
 2032                 mpt_raid_free_mem(mpt);
 2033                 return (EIO);
 2034         }
 2035 
 2036         mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
 2037             hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
 2038 
 2039         len = hdr.PageLength * sizeof(uint32_t);
 2040         mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 2041         if (mpt->ioc_page3 == NULL) {
 2042                 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
 2043                 mpt_raid_free_mem(mpt);
 2044                 return (ENOMEM);
 2045         }
 2046         memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
 2047         rv = mpt_read_cur_cfg_page(mpt, 0,
 2048             &mpt->ioc_page3->Header, len, FALSE, 5000);
 2049         if (rv) {
 2050                 mpt_raid_free_mem(mpt);
 2051                 return (EIO);
 2052         }
 2053         mpt2host_config_page_ioc3(mpt->ioc_page3);
 2054         mpt_raid_wakeup(mpt);
 2055         return (0);
 2056 }
 2057 
 2058 /*
 2059  * Enable IOC port
 2060  */
 2061 static int
 2062 mpt_send_port_enable(struct mpt_softc *mpt, int port)
 2063 {
 2064         request_t       *req;
 2065         MSG_PORT_ENABLE *enable_req;
 2066         int              error;
 2067 
 2068         req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
 2069         if (req == NULL)
 2070                 return (-1);
 2071 
 2072         enable_req = req->req_vbuf;
 2073         memset(enable_req, 0,  MPT_RQSL(mpt));
 2074 
 2075         enable_req->Function   = MPI_FUNCTION_PORT_ENABLE;
 2076         enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
 2077         enable_req->PortNumber = port;
 2078 
 2079         mpt_check_doorbell(mpt);
 2080         mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
 2081 
 2082         mpt_send_cmd(mpt, req);
 2083         error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
 2084             FALSE, (mpt->is_sas || mpt->is_fc)? 300000 : 30000);
 2085         if (error != 0) {
 2086                 mpt_prt(mpt, "port %d enable timed out\n", port);
 2087                 return (-1);
 2088         }
 2089         mpt_free_request(mpt, req);
 2090         mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
 2091         return (0);
 2092 }
 2093 
 2094 /*
 2095  * Enable/Disable asynchronous event reporting.
 2096  */
 2097 static int
 2098 mpt_send_event_request(struct mpt_softc *mpt, int onoff)
 2099 {
 2100         request_t *req;
 2101         MSG_EVENT_NOTIFY *enable_req;
 2102 
 2103         req = mpt_get_request(mpt, FALSE);
 2104         if (req == NULL) {
 2105                 return (ENOMEM);
 2106         }
 2107         enable_req = req->req_vbuf;
 2108         memset(enable_req, 0, sizeof *enable_req);
 2109 
 2110         enable_req->Function   = MPI_FUNCTION_EVENT_NOTIFICATION;
 2111         enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
 2112         enable_req->Switch     = onoff;
 2113 
 2114         mpt_check_doorbell(mpt);
 2115         mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
 2116             onoff ? "en" : "dis");
 2117         /*
 2118          * Send the command off, but don't wait for it.
 2119          */
 2120         mpt_send_cmd(mpt, req);
 2121         return (0);
 2122 }
 2123 
 2124 /*
 2125  * Un-mask the interrupts on the chip.
 2126  */
 2127 void
 2128 mpt_enable_ints(struct mpt_softc *mpt)
 2129 {
 2130 
 2131         /* Unmask every thing except door bell int */
 2132         mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
 2133 }
 2134 
 2135 /*
 2136  * Mask the interrupts on the chip.
 2137  */
 2138 void
 2139 mpt_disable_ints(struct mpt_softc *mpt)
 2140 {
 2141 
 2142         /* Mask all interrupts */
 2143         mpt_write(mpt, MPT_OFFSET_INTR_MASK,
 2144             MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
 2145 }
 2146 
 2147 static void
 2148 mpt_sysctl_attach(struct mpt_softc *mpt)
 2149 {
 2150         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
 2151         struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
 2152 
 2153         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
 2154                        "debug", CTLFLAG_RW, &mpt->verbose, 0,
 2155                        "Debugging/Verbose level");
 2156         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
 2157                        "role", CTLFLAG_RD, &mpt->role, 0,
 2158                        "HBA role");
 2159 #ifdef  MPT_TEST_MULTIPATH
 2160         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
 2161                        "failure_id", CTLFLAG_RW, &mpt->failure_id, -1,
 2162                        "Next Target to Fail");
 2163 #endif
 2164 }
 2165 
 2166 int
 2167 mpt_attach(struct mpt_softc *mpt)
 2168 {
 2169         struct mpt_personality *pers;
 2170         int i;
 2171         int error;
 2172 
 2173         mpt_core_attach(mpt);
 2174         mpt_core_enable(mpt);
 2175 
 2176         TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links);
 2177         for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
 2178                 pers = mpt_personalities[i];
 2179                 if (pers == NULL) {
 2180                         continue;
 2181                 }
 2182                 if (pers->probe(mpt) == 0) {
 2183                         error = pers->attach(mpt);
 2184                         if (error != 0) {
 2185                                 mpt_detach(mpt);
 2186                                 return (error);
 2187                         }
 2188                         mpt->mpt_pers_mask |= (0x1 << pers->id);
 2189                         pers->use_count++;
 2190                 }
 2191         }
 2192 
 2193         /*
 2194          * Now that we've attached everything, do the enable function
 2195          * for all of the personalities. This allows the personalities
 2196          * to do setups that are appropriate for them prior to enabling
 2197          * any ports.
 2198          */
 2199         for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
 2200                 pers = mpt_personalities[i];
 2201                 if (pers != NULL  && MPT_PERS_ATTACHED(pers, mpt) != 0) {
 2202                         error = pers->enable(mpt);
 2203                         if (error != 0) {
 2204                                 mpt_prt(mpt, "personality %s attached but would"
 2205                                     " not enable (%d)\n", pers->name, error);
 2206                                 mpt_detach(mpt);
 2207                                 return (error);
 2208                         }
 2209                 }
 2210         }
 2211         return (0);
 2212 }
 2213 
 2214 int
 2215 mpt_shutdown(struct mpt_softc *mpt)
 2216 {
 2217         struct mpt_personality *pers;
 2218 
 2219         MPT_PERS_FOREACH_REVERSE(mpt, pers) {
 2220                 pers->shutdown(mpt);
 2221         }
 2222         return (0);
 2223 }
 2224 
 2225 int
 2226 mpt_detach(struct mpt_softc *mpt)
 2227 {
 2228         struct mpt_personality *pers;
 2229 
 2230         MPT_PERS_FOREACH_REVERSE(mpt, pers) {
 2231                 pers->detach(mpt);
 2232                 mpt->mpt_pers_mask &= ~(0x1 << pers->id);
 2233                 pers->use_count--;
 2234         }
 2235         TAILQ_REMOVE(&mpt_tailq, mpt, links);
 2236         return (0);
 2237 }
 2238 
 2239 static int
 2240 mpt_core_load(struct mpt_personality *pers)
 2241 {
 2242         int i;
 2243 
 2244         /*
 2245          * Setup core handlers and insert the default handler
 2246          * into all "empty slots".
 2247          */
 2248         for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
 2249                 mpt_reply_handlers[i] = mpt_default_reply_handler;
 2250         }
 2251 
 2252         mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
 2253             mpt_event_reply_handler;
 2254         mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
 2255             mpt_config_reply_handler;
 2256         mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
 2257             mpt_handshake_reply_handler;
 2258         return (0);
 2259 }
 2260 
 2261 /*
 2262  * Initialize per-instance driver data and perform
 2263  * initial controller configuration.
 2264  */
 2265 static int
 2266 mpt_core_attach(struct mpt_softc *mpt)
 2267 {
 2268         int val, error;
 2269 
 2270         LIST_INIT(&mpt->ack_frames);
 2271         /* Put all request buffers on the free list */
 2272         TAILQ_INIT(&mpt->request_pending_list);
 2273         TAILQ_INIT(&mpt->request_free_list);
 2274         TAILQ_INIT(&mpt->request_timeout_list);
 2275         for (val = 0; val < MPT_MAX_LUNS; val++) {
 2276                 STAILQ_INIT(&mpt->trt[val].atios);
 2277                 STAILQ_INIT(&mpt->trt[val].inots);
 2278         }
 2279         STAILQ_INIT(&mpt->trt_wildcard.atios);
 2280         STAILQ_INIT(&mpt->trt_wildcard.inots);
 2281 #ifdef  MPT_TEST_MULTIPATH
 2282         mpt->failure_id = -1;
 2283 #endif
 2284         mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
 2285         mpt_sysctl_attach(mpt);
 2286         mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
 2287             mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
 2288 
 2289         MPT_LOCK(mpt);
 2290         error = mpt_configure_ioc(mpt, 0, 0);
 2291         MPT_UNLOCK(mpt);
 2292 
 2293         return (error);
 2294 }
 2295 
 2296 static int
 2297 mpt_core_enable(struct mpt_softc *mpt)
 2298 {
 2299 
 2300         /*
 2301          * We enter with the IOC enabled, but async events
 2302          * not enabled, ports not enabled and interrupts
 2303          * not enabled.
 2304          */
 2305         MPT_LOCK(mpt);
 2306 
 2307         /*
 2308          * Enable asynchronous event reporting- all personalities
 2309          * have attached so that they should be able to now field
 2310          * async events.
 2311          */
 2312         mpt_send_event_request(mpt, 1);
 2313 
 2314         /*
 2315          * Catch any pending interrupts
 2316          *
 2317          * This seems to be crucial- otherwise
 2318          * the portenable below times out.
 2319          */
 2320         mpt_intr(mpt);
 2321 
 2322         /*
 2323          * Enable Interrupts
 2324          */
 2325         mpt_enable_ints(mpt);
 2326 
 2327         /*
 2328          * Catch any pending interrupts
 2329          *
 2330          * This seems to be crucial- otherwise
 2331          * the portenable below times out.
 2332          */
 2333         mpt_intr(mpt);
 2334 
 2335         /*
 2336          * Enable the port.
 2337          */
 2338         if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
 2339                 mpt_prt(mpt, "failed to enable port 0\n");
 2340                 MPT_UNLOCK(mpt);
 2341                 return (ENXIO);
 2342         }
 2343         MPT_UNLOCK(mpt);
 2344         return (0);
 2345 }
 2346 
 2347 static void
 2348 mpt_core_shutdown(struct mpt_softc *mpt)
 2349 {
 2350 
 2351         mpt_disable_ints(mpt);
 2352 }
 2353 
 2354 static void
 2355 mpt_core_detach(struct mpt_softc *mpt)
 2356 {
 2357         int val;
 2358 
 2359         /*
 2360          * XXX: FREE MEMORY 
 2361          */
 2362         mpt_disable_ints(mpt);
 2363 
 2364         /* Make sure no request has pending timeouts. */
 2365         for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
 2366                 request_t *req = &mpt->request_pool[val];
 2367                 mpt_callout_drain(mpt, &req->callout);
 2368         }
 2369 
 2370         mpt_dma_buf_free(mpt);
 2371 }
 2372 
 2373 static int
 2374 mpt_core_unload(struct mpt_personality *pers)
 2375 {
 2376 
 2377         /* Unload is always successful. */
 2378         return (0);
 2379 }
 2380 
 2381 #define FW_UPLOAD_REQ_SIZE                              \
 2382         (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION)  \
 2383        + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
 2384 
 2385 static int
 2386 mpt_upload_fw(struct mpt_softc *mpt)
 2387 {
 2388         uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
 2389         MSG_FW_UPLOAD_REPLY fw_reply;
 2390         MSG_FW_UPLOAD *fw_req;
 2391         FW_UPLOAD_TCSGE *tsge;
 2392         SGE_SIMPLE32 *sge;
 2393         uint32_t flags;
 2394         int error;
 2395         
 2396         memset(&fw_req_buf, 0, sizeof(fw_req_buf));
 2397         fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
 2398         fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
 2399         fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
 2400         fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
 2401         tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
 2402         tsge->DetailsLength = 12;
 2403         tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
 2404         tsge->ImageSize = htole32(mpt->fw_image_size);
 2405         sge = (SGE_SIMPLE32 *)(tsge + 1);
 2406         flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
 2407               | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
 2408               | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
 2409         flags <<= MPI_SGE_FLAGS_SHIFT;
 2410         sge->FlagsLength = htole32(flags | mpt->fw_image_size);
 2411         sge->Address = htole32(mpt->fw_phys);
 2412         bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREREAD);
 2413         error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
 2414         if (error)
 2415                 return(error);
 2416         error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
 2417         bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTREAD);
 2418         return (error);
 2419 }
 2420 
 2421 static void
 2422 mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
 2423                uint32_t *data, bus_size_t len)
 2424 {
 2425         uint32_t *data_end;
 2426 
 2427         data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
 2428         if (mpt->is_sas) {
 2429                 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
 2430         }
 2431         mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
 2432         while (data != data_end) {
 2433                 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
 2434                 data++;
 2435         }
 2436         if (mpt->is_sas) {
 2437                 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
 2438         }
 2439 }
 2440 
 2441 static int
 2442 mpt_download_fw(struct mpt_softc *mpt)
 2443 {
 2444         MpiFwHeader_t *fw_hdr;
 2445         int error;
 2446         uint32_t ext_offset;
 2447         uint32_t data;
 2448 
 2449         if (mpt->pci_pio_reg == NULL) {
 2450                 mpt_prt(mpt, "No PIO resource!\n");
 2451                 return (ENXIO);
 2452         }
 2453 
 2454         mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
 2455                 mpt->fw_image_size);
 2456 
 2457         error = mpt_enable_diag_mode(mpt);
 2458         if (error != 0) {
 2459                 mpt_prt(mpt, "Could not enter diagnostic mode!\n");
 2460                 return (EIO);
 2461         }
 2462 
 2463         mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
 2464                   MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
 2465 
 2466         fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
 2467         bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREWRITE);
 2468         mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
 2469                        fw_hdr->ImageSize);
 2470         bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTWRITE);
 2471 
 2472         ext_offset = fw_hdr->NextImageHeaderOffset;
 2473         while (ext_offset != 0) {
 2474                 MpiExtImageHeader_t *ext;
 2475 
 2476                 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
 2477                 ext_offset = ext->NextImageHeaderOffset;
 2478                 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap,
 2479                     BUS_DMASYNC_PREWRITE);
 2480                 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
 2481                                ext->ImageSize);
 2482                 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap,
 2483                     BUS_DMASYNC_POSTWRITE);
 2484         }
 2485 
 2486         if (mpt->is_sas) {
 2487                 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
 2488         }
 2489         /* Setup the address to jump to on reset. */
 2490         mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
 2491         mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
 2492 
 2493         /*
 2494          * The controller sets the "flash bad" status after attempting
 2495          * to auto-boot from flash.  Clear the status so that the controller
 2496          * will continue the boot process with our newly installed firmware.
 2497          */
 2498         mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
 2499         data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
 2500         mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
 2501         mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
 2502 
 2503         if (mpt->is_sas) {
 2504                 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
 2505         }
 2506 
 2507         /*
 2508          * Re-enable the processor and clear the boot halt flag.
 2509          */
 2510         data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
 2511         data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
 2512         mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
 2513 
 2514         mpt_disable_diag_mode(mpt);
 2515         return (0);
 2516 }
 2517 
 2518 static int
 2519 mpt_dma_buf_alloc(struct mpt_softc *mpt)
 2520 {
 2521         struct mpt_map_info mi;
 2522         uint8_t *vptr;
 2523         uint32_t pptr, end;
 2524         int i, error;
 2525 
 2526         /* Create a child tag for data buffers */
 2527         if (mpt_dma_tag_create(mpt, mpt->parent_dmat, 1,
 2528             0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
 2529             NULL, NULL, (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE,
 2530             mpt->max_cam_seg_cnt, BUS_SPACE_MAXSIZE_32BIT, 0,
 2531             &mpt->buffer_dmat) != 0) {
 2532                 mpt_prt(mpt, "cannot create a dma tag for data buffers\n");
 2533                 return (1);
 2534         }
 2535 
 2536         /* Create a child tag for request buffers */
 2537         if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0,
 2538             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
 2539             NULL, NULL, MPT_REQ_MEM_SIZE(mpt), 1, BUS_SPACE_MAXSIZE_32BIT, 0,
 2540             &mpt->request_dmat) != 0) {
 2541                 mpt_prt(mpt, "cannot create a dma tag for requests\n");
 2542                 return (1);
 2543         }
 2544 
 2545         /* Allocate some DMA accessible memory for requests */
 2546         if (bus_dmamem_alloc(mpt->request_dmat, (void **)&mpt->request,
 2547             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &mpt->request_dmap) != 0) {
 2548                 mpt_prt(mpt, "cannot allocate %d bytes of request memory\n",
 2549                     MPT_REQ_MEM_SIZE(mpt));
 2550                 return (1);
 2551         }
 2552 
 2553         mi.mpt = mpt;
 2554         mi.error = 0;
 2555 
 2556         /* Load and lock it into "bus space" */
 2557         bus_dmamap_load(mpt->request_dmat, mpt->request_dmap, mpt->request,
 2558             MPT_REQ_MEM_SIZE(mpt), mpt_map_rquest, &mi, 0);
 2559 
 2560         if (mi.error) {
 2561                 mpt_prt(mpt, "error %d loading dma map for DMA request queue\n",
 2562                     mi.error);
 2563                 return (1);
 2564         }
 2565         mpt->request_phys = mi.phys;
 2566 
 2567         /*
 2568          * Now create per-request dma maps
 2569          */
 2570         i = 0;
 2571         pptr =  mpt->request_phys;
 2572         vptr =  mpt->request;
 2573         end = pptr + MPT_REQ_MEM_SIZE(mpt);
 2574         while(pptr < end) {
 2575                 request_t *req = &mpt->request_pool[i];
 2576                 req->index = i++;
 2577 
 2578                 /* Store location of Request Data */
 2579                 req->req_pbuf = pptr;
 2580                 req->req_vbuf = vptr;
 2581 
 2582                 pptr += MPT_REQUEST_AREA;
 2583                 vptr += MPT_REQUEST_AREA;
 2584 
 2585                 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
 2586                 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
 2587 
 2588                 error = bus_dmamap_create(mpt->buffer_dmat, 0, &req->dmap);
 2589                 if (error) {
 2590                         mpt_prt(mpt, "error %d creating per-cmd DMA maps\n",
 2591                             error);
 2592                         return (1);
 2593                 }
 2594         }
 2595 
 2596         return (0);
 2597 }
 2598 
 2599 static void
 2600 mpt_dma_buf_free(struct mpt_softc *mpt)
 2601 {
 2602         int i;
 2603 
 2604         if (mpt->request_dmat == 0) {
 2605                 mpt_lprt(mpt, MPT_PRT_DEBUG, "already released dma memory\n");
 2606                 return;
 2607         }
 2608         for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
 2609                 bus_dmamap_destroy(mpt->buffer_dmat, mpt->request_pool[i].dmap);
 2610         }
 2611         bus_dmamap_unload(mpt->request_dmat, mpt->request_dmap);
 2612         bus_dmamem_free(mpt->request_dmat, mpt->request, mpt->request_dmap);
 2613         bus_dma_tag_destroy(mpt->request_dmat);
 2614         mpt->request_dmat = 0;
 2615         bus_dma_tag_destroy(mpt->buffer_dmat);
 2616 }
 2617 
 2618 /*
 2619  * Allocate/Initialize data structures for the controller.  Called
 2620  * once at instance startup.
 2621  */
 2622 static int
 2623 mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset)
 2624 {
 2625         PTR_MSG_PORT_FACTS_REPLY pfp;
 2626         int error, port, val;
 2627         size_t len;
 2628 
 2629         if (tn == MPT_MAX_TRYS) {
 2630                 return (-1);
 2631         }
 2632 
 2633         /*
 2634          * No need to reset if the IOC is already in the READY state.
 2635          *
 2636          * Force reset if initialization failed previously.
 2637          * Note that a hard_reset of the second channel of a '929
 2638          * will stop operation of the first channel.  Hopefully, if the
 2639          * first channel is ok, the second will not require a hard
 2640          * reset.
 2641          */
 2642         if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) {
 2643                 if (mpt_reset(mpt, FALSE) != MPT_OK) {
 2644                         return (mpt_configure_ioc(mpt, tn++, 1));
 2645                 }
 2646                 needreset = 0;
 2647         }
 2648 
 2649         if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) {
 2650                 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
 2651                 return (mpt_configure_ioc(mpt, tn++, 1));
 2652         }
 2653         mpt2host_iocfacts_reply(&mpt->ioc_facts);
 2654 
 2655         mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
 2656             mpt->ioc_facts.MsgVersion >> 8,
 2657             mpt->ioc_facts.MsgVersion & 0xFF,
 2658             mpt->ioc_facts.HeaderVersion >> 8,
 2659             mpt->ioc_facts.HeaderVersion & 0xFF);
 2660 
 2661         /*
 2662          * Now that we know request frame size, we can calculate
 2663          * the actual (reasonable) segment limit for read/write I/O.
 2664          *
 2665          * This limit is constrained by:
 2666          *
 2667          *  + The size of each area we allocate per command (and how
 2668          *    many chain segments we can fit into it).
 2669          *  + The total number of areas we've set up.
 2670          *  + The actual chain depth the card will allow.
 2671          *
 2672          * The first area's segment count is limited by the I/O request
 2673          * at the head of it. We cannot allocate realistically more
 2674          * than MPT_MAX_REQUESTS areas. Therefore, to account for both
 2675          * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
 2676          *
 2677          */
 2678         /* total number of request areas we (can) allocate */
 2679         mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
 2680 
 2681         /* converted to the number of chain areas possible */
 2682         mpt->max_seg_cnt *= MPT_NRFM(mpt);
 2683 
 2684         /* limited by the number of chain areas the card will support */
 2685         if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) {
 2686                 mpt_lprt(mpt, MPT_PRT_INFO,
 2687                     "chain depth limited to %u (from %u)\n",
 2688                     mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt);
 2689                 mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth;
 2690         }
 2691 
 2692         /* converted to the number of simple sges in chain segments. */
 2693         mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
 2694 
 2695         /*
 2696          * Use this as the basis for reporting the maximum I/O size to CAM.
 2697          */
 2698         mpt->max_cam_seg_cnt = min(mpt->max_seg_cnt, (MAXPHYS / PAGE_SIZE) + 1);
 2699 
 2700         error = mpt_dma_buf_alloc(mpt);
 2701         if (error != 0) {
 2702                 mpt_prt(mpt, "mpt_dma_buf_alloc() failed!\n");
 2703                 return (EIO);
 2704         }
 2705 
 2706         for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
 2707                 request_t *req = &mpt->request_pool[val];
 2708                 req->state = REQ_STATE_ALLOCATED;
 2709                 mpt_callout_init(mpt, &req->callout);
 2710                 mpt_free_request(mpt, req);
 2711         }
 2712 
 2713         mpt_lprt(mpt, MPT_PRT_INFO, "Maximum Segment Count: %u, Maximum "
 2714                  "CAM Segment Count: %u\n", mpt->max_seg_cnt,
 2715                  mpt->max_cam_seg_cnt);
 2716 
 2717         mpt_lprt(mpt, MPT_PRT_INFO, "MsgLength=%u IOCNumber = %d\n",
 2718             mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber);
 2719         mpt_lprt(mpt, MPT_PRT_INFO,
 2720             "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
 2721             "Request Frame Size %u bytes Max Chain Depth %u\n",
 2722             mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize,
 2723             mpt->ioc_facts.RequestFrameSize << 2,
 2724             mpt->ioc_facts.MaxChainDepth);
 2725         mpt_lprt(mpt, MPT_PRT_INFO, "IOCFACTS: Num Ports %d, FWImageSize %d, "
 2726             "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts,
 2727             mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags);
 2728 
 2729         len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY);
 2730         mpt->port_facts = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 2731         if (mpt->port_facts == NULL) {
 2732                 mpt_prt(mpt, "unable to allocate memory for port facts\n");
 2733                 return (ENOMEM);
 2734         }
 2735 
 2736 
 2737         if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) &&
 2738             (mpt->fw_uploaded == 0)) {
 2739                 struct mpt_map_info mi;
 2740 
 2741                 /*
 2742                  * In some configurations, the IOC's firmware is
 2743                  * stored in a shared piece of system NVRAM that
 2744                  * is only accessible via the BIOS.  In this
 2745                  * case, the firmware keeps a copy of firmware in
 2746                  * RAM until the OS driver retrieves it.  Once
 2747                  * retrieved, we are responsible for re-downloading
 2748                  * the firmware after any hard-reset.
 2749                  */
 2750                 mpt->fw_image_size = mpt->ioc_facts.FWImageSize;
 2751                 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
 2752                     BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 2753                     mpt->fw_image_size, 1, mpt->fw_image_size, 0,
 2754                     &mpt->fw_dmat);
 2755                 if (error != 0) {
 2756                         mpt_prt(mpt, "cannot create firmware dma tag\n");
 2757                         return (ENOMEM);
 2758                 }
 2759                 error = bus_dmamem_alloc(mpt->fw_dmat,
 2760                     (void **)&mpt->fw_image, BUS_DMA_NOWAIT |
 2761                     BUS_DMA_COHERENT, &mpt->fw_dmap);
 2762                 if (error != 0) {
 2763                         mpt_prt(mpt, "cannot allocate firmware memory\n");
 2764                         bus_dma_tag_destroy(mpt->fw_dmat);
 2765                         return (ENOMEM);
 2766                 }
 2767                 mi.mpt = mpt;
 2768                 mi.error = 0;
 2769                 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
 2770                     mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0);
 2771                 mpt->fw_phys = mi.phys;
 2772 
 2773                 error = mpt_upload_fw(mpt);
 2774                 if (error != 0) {
 2775                         mpt_prt(mpt, "firmware upload failed.\n");
 2776                         bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
 2777                         bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
 2778                             mpt->fw_dmap);
 2779                         bus_dma_tag_destroy(mpt->fw_dmat);
 2780                         mpt->fw_image = NULL;
 2781                         return (EIO);
 2782                 }
 2783                 mpt->fw_uploaded = 1;
 2784         }
 2785 
 2786         for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) {
 2787                 pfp = &mpt->port_facts[port];
 2788                 error = mpt_get_portfacts(mpt, 0, pfp);
 2789                 if (error != MPT_OK) {
 2790                         mpt_prt(mpt,
 2791                             "mpt_get_portfacts on port %d failed\n", port);
 2792                         free(mpt->port_facts, M_DEVBUF);
 2793                         mpt->port_facts = NULL;
 2794                         return (mpt_configure_ioc(mpt, tn++, 1));
 2795                 }
 2796                 mpt2host_portfacts_reply(pfp);
 2797 
 2798                 if (port > 0) {
 2799                         error = MPT_PRT_INFO;
 2800                 } else {
 2801                         error = MPT_PRT_DEBUG;
 2802                 }
 2803                 mpt_lprt(mpt, error,
 2804                     "PORTFACTS[%d]: Type %x PFlags %x IID %d MaxDev %d\n",
 2805                     port, pfp->PortType, pfp->ProtocolFlags, pfp->PortSCSIID,
 2806                     pfp->MaxDevices);
 2807 
 2808         }
 2809 
 2810         /*
 2811          * XXX: Not yet supporting more than port 0
 2812          */
 2813         pfp = &mpt->port_facts[0];
 2814         if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_FC) {
 2815                 mpt->is_fc = 1;
 2816                 mpt->is_sas = 0;
 2817                 mpt->is_spi = 0;
 2818         } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
 2819                 mpt->is_fc = 0;
 2820                 mpt->is_sas = 1;
 2821                 mpt->is_spi = 0;
 2822         } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SCSI) {
 2823                 mpt->is_fc = 0;
 2824                 mpt->is_sas = 0;
 2825                 mpt->is_spi = 1;
 2826                 if (mpt->mpt_ini_id == MPT_INI_ID_NONE)
 2827                         mpt->mpt_ini_id = pfp->PortSCSIID;
 2828         } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_ISCSI) {
 2829                 mpt_prt(mpt, "iSCSI not supported yet\n");
 2830                 return (ENXIO);
 2831         } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_INACTIVE) {
 2832                 mpt_prt(mpt, "Inactive Port\n");
 2833                 return (ENXIO);
 2834         } else {
 2835                 mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType);
 2836                 return (ENXIO);
 2837         }
 2838 
 2839         /*
 2840          * Set our role with what this port supports.
 2841          *
 2842          * Note this might be changed later in different modules
 2843          * if this is different from what is wanted.
 2844          */
 2845         mpt->role = MPT_ROLE_NONE;
 2846         if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
 2847                 mpt->role |= MPT_ROLE_INITIATOR;
 2848         }
 2849         if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
 2850                 mpt->role |= MPT_ROLE_TARGET;
 2851         }
 2852 
 2853         /*
 2854          * Enable the IOC
 2855          */
 2856         if (mpt_enable_ioc(mpt, 1) != MPT_OK) {
 2857                 mpt_prt(mpt, "unable to initialize IOC\n");
 2858                 return (ENXIO);
 2859         }
 2860 
 2861         /*
 2862          * Read IOC configuration information.
 2863          *
 2864          * We need this to determine whether or not we have certain
 2865          * settings for Integrated Mirroring (e.g.).
 2866          */
 2867         mpt_read_config_info_ioc(mpt);
 2868 
 2869         return (0);
 2870 }
 2871 
 2872 static int
 2873 mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
 2874 {
 2875         uint32_t pptr;
 2876         int val;
 2877 
 2878         if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
 2879                 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
 2880                 return (EIO);
 2881         }
 2882 
 2883         mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
 2884 
 2885         if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
 2886                 mpt_prt(mpt, "IOC failed to go to run state\n");
 2887                 return (ENXIO);
 2888         }
 2889         mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
 2890 
 2891         /*
 2892          * Give it reply buffers
 2893          *
 2894          * Do *not* exceed global credits.
 2895          */
 2896         for (val = 0, pptr = mpt->reply_phys;
 2897             (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
 2898              pptr += MPT_REPLY_SIZE) {
 2899                 mpt_free_reply(mpt, pptr);
 2900                 if (++val == mpt->ioc_facts.GlobalCredits - 1)
 2901                         break;
 2902         }
 2903 
 2904 
 2905         /*
 2906          * Enable the port if asked. This is only done if we're resetting
 2907          * the IOC after initial startup.
 2908          */
 2909         if (portenable) {
 2910                 /*
 2911                  * Enable asynchronous event reporting
 2912                  */
 2913                 mpt_send_event_request(mpt, 1);
 2914 
 2915                 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
 2916                         mpt_prt(mpt, "%s: failed to enable port 0\n", __func__);
 2917                         return (ENXIO);
 2918                 }
 2919         }
 2920         return (MPT_OK);
 2921 }
 2922 
 2923 /*
 2924  * Endian Conversion Functions- only used on Big Endian machines
 2925  */
 2926 #if     _BYTE_ORDER == _BIG_ENDIAN
 2927 void
 2928 mpt2host_sge_simple_union(SGE_SIMPLE_UNION *sge)
 2929 {
 2930 
 2931         MPT_2_HOST32(sge, FlagsLength);
 2932         MPT_2_HOST32(sge, u.Address64.Low);
 2933         MPT_2_HOST32(sge, u.Address64.High);
 2934 }
 2935 
 2936 void
 2937 mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *rp)
 2938 {
 2939 
 2940         MPT_2_HOST16(rp, MsgVersion);
 2941         MPT_2_HOST16(rp, HeaderVersion);
 2942         MPT_2_HOST32(rp, MsgContext);
 2943         MPT_2_HOST16(rp, IOCExceptions);
 2944         MPT_2_HOST16(rp, IOCStatus);
 2945         MPT_2_HOST32(rp, IOCLogInfo);
 2946         MPT_2_HOST16(rp, ReplyQueueDepth);
 2947         MPT_2_HOST16(rp, RequestFrameSize);
 2948         MPT_2_HOST16(rp, Reserved_0101_FWVersion);
 2949         MPT_2_HOST16(rp, ProductID);
 2950         MPT_2_HOST32(rp, CurrentHostMfaHighAddr);
 2951         MPT_2_HOST16(rp, GlobalCredits);
 2952         MPT_2_HOST32(rp, CurrentSenseBufferHighAddr);
 2953         MPT_2_HOST16(rp, CurReplyFrameSize);
 2954         MPT_2_HOST32(rp, FWImageSize);
 2955         MPT_2_HOST32(rp, IOCCapabilities);
 2956         MPT_2_HOST32(rp, FWVersion.Word);
 2957         MPT_2_HOST16(rp, HighPriorityQueueDepth);
 2958         MPT_2_HOST16(rp, Reserved2);
 2959         mpt2host_sge_simple_union(&rp->HostPageBufferSGE);
 2960         MPT_2_HOST32(rp, ReplyFifoHostSignalingAddr);
 2961 }
 2962 
 2963 void
 2964 mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *pfp)
 2965 {
 2966 
 2967         MPT_2_HOST16(pfp, Reserved);
 2968         MPT_2_HOST16(pfp, Reserved1);
 2969         MPT_2_HOST32(pfp, MsgContext);
 2970         MPT_2_HOST16(pfp, Reserved2);
 2971         MPT_2_HOST16(pfp, IOCStatus);
 2972         MPT_2_HOST32(pfp, IOCLogInfo);
 2973         MPT_2_HOST16(pfp, MaxDevices);
 2974         MPT_2_HOST16(pfp, PortSCSIID);
 2975         MPT_2_HOST16(pfp, ProtocolFlags);
 2976         MPT_2_HOST16(pfp, MaxPostedCmdBuffers);
 2977         MPT_2_HOST16(pfp, MaxPersistentIDs);
 2978         MPT_2_HOST16(pfp, MaxLanBuckets);
 2979         MPT_2_HOST16(pfp, Reserved4);
 2980         MPT_2_HOST32(pfp, Reserved5);
 2981 }
 2982 
 2983 void
 2984 mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *ioc2)
 2985 {
 2986         int i;
 2987 
 2988         MPT_2_HOST32(ioc2, CapabilitiesFlags);
 2989         for (i = 0; i < MPI_IOC_PAGE_2_RAID_VOLUME_MAX; i++) {
 2990                 MPT_2_HOST16(ioc2, RaidVolume[i].Reserved3);
 2991         }
 2992 }
 2993 
 2994 void
 2995 mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *ioc3)
 2996 {
 2997 
 2998         MPT_2_HOST16(ioc3, Reserved2);
 2999 }
 3000 
 3001 void
 3002 mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *sp0)
 3003 {
 3004 
 3005         MPT_2_HOST32(sp0, Capabilities);
 3006         MPT_2_HOST32(sp0, PhysicalInterface);
 3007 }
 3008 
 3009 void
 3010 mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
 3011 {
 3012 
 3013         MPT_2_HOST32(sp1, Configuration);
 3014         MPT_2_HOST32(sp1, OnBusTimerValue);
 3015         MPT_2_HOST16(sp1, IDConfig);
 3016 }
 3017 
 3018 void
 3019 host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
 3020 {
 3021 
 3022         HOST_2_MPT32(sp1, Configuration);
 3023         HOST_2_MPT32(sp1, OnBusTimerValue);
 3024         HOST_2_MPT16(sp1, IDConfig);
 3025 }
 3026 
 3027 void
 3028 mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *sp2)
 3029 {
 3030         int i;
 3031 
 3032         MPT_2_HOST32(sp2, PortFlags);
 3033         MPT_2_HOST32(sp2, PortSettings);
 3034         for (i = 0; i < sizeof(sp2->DeviceSettings) /
 3035             sizeof(*sp2->DeviceSettings); i++) {
 3036                 MPT_2_HOST16(sp2, DeviceSettings[i].DeviceFlags);
 3037         }
 3038 }
 3039 
 3040 void
 3041 mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *sd0)
 3042 {
 3043 
 3044         MPT_2_HOST32(sd0, NegotiatedParameters);
 3045         MPT_2_HOST32(sd0, Information);
 3046 }
 3047 
 3048 void
 3049 mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
 3050 {
 3051 
 3052         MPT_2_HOST32(sd1, RequestedParameters);
 3053         MPT_2_HOST32(sd1, Reserved);
 3054         MPT_2_HOST32(sd1, Configuration);
 3055 }
 3056 
 3057 void
 3058 host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
 3059 {
 3060 
 3061         HOST_2_MPT32(sd1, RequestedParameters);
 3062         HOST_2_MPT32(sd1, Reserved);
 3063         HOST_2_MPT32(sd1, Configuration);
 3064 }
 3065 
 3066 void
 3067 mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *fp0)
 3068 {
 3069 
 3070         MPT_2_HOST32(fp0, Flags);
 3071         MPT_2_HOST32(fp0, PortIdentifier);
 3072         MPT_2_HOST32(fp0, WWNN.Low);
 3073         MPT_2_HOST32(fp0, WWNN.High);
 3074         MPT_2_HOST32(fp0, WWPN.Low);
 3075         MPT_2_HOST32(fp0, WWPN.High);
 3076         MPT_2_HOST32(fp0, SupportedServiceClass);
 3077         MPT_2_HOST32(fp0, SupportedSpeeds);
 3078         MPT_2_HOST32(fp0, CurrentSpeed);
 3079         MPT_2_HOST32(fp0, MaxFrameSize);
 3080         MPT_2_HOST32(fp0, FabricWWNN.Low);
 3081         MPT_2_HOST32(fp0, FabricWWNN.High);
 3082         MPT_2_HOST32(fp0, FabricWWPN.Low);
 3083         MPT_2_HOST32(fp0, FabricWWPN.High);
 3084         MPT_2_HOST32(fp0, DiscoveredPortsCount);
 3085         MPT_2_HOST32(fp0, MaxInitiators);
 3086 }
 3087 
 3088 void
 3089 mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
 3090 {
 3091 
 3092         MPT_2_HOST32(fp1, Flags);
 3093         MPT_2_HOST32(fp1, NoSEEPROMWWNN.Low);
 3094         MPT_2_HOST32(fp1, NoSEEPROMWWNN.High);
 3095         MPT_2_HOST32(fp1, NoSEEPROMWWPN.Low);
 3096         MPT_2_HOST32(fp1, NoSEEPROMWWPN.High);
 3097 }
 3098 
 3099 void
 3100 host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
 3101 {
 3102 
 3103         HOST_2_MPT32(fp1, Flags);
 3104         HOST_2_MPT32(fp1, NoSEEPROMWWNN.Low);
 3105         HOST_2_MPT32(fp1, NoSEEPROMWWNN.High);
 3106         HOST_2_MPT32(fp1, NoSEEPROMWWPN.Low);
 3107         HOST_2_MPT32(fp1, NoSEEPROMWWPN.High);
 3108 }
 3109 
 3110 void
 3111 mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *volp)
 3112 {
 3113         int i;
 3114 
 3115         MPT_2_HOST16(volp, VolumeStatus.Reserved);
 3116         MPT_2_HOST16(volp, VolumeSettings.Settings);
 3117         MPT_2_HOST32(volp, MaxLBA);
 3118         MPT_2_HOST32(volp, MaxLBAHigh);
 3119         MPT_2_HOST32(volp, StripeSize);
 3120         MPT_2_HOST32(volp, Reserved2);
 3121         MPT_2_HOST32(volp, Reserved3);
 3122         for (i = 0; i < MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX; i++) {
 3123                 MPT_2_HOST16(volp, PhysDisk[i].Reserved);
 3124         }
 3125 }
 3126 
 3127 void
 3128 mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *rpd0)
 3129 {
 3130 
 3131         MPT_2_HOST32(rpd0, Reserved1);
 3132         MPT_2_HOST16(rpd0, PhysDiskStatus.Reserved);
 3133         MPT_2_HOST32(rpd0, MaxLBA);
 3134         MPT_2_HOST16(rpd0, ErrorData.Reserved);
 3135         MPT_2_HOST16(rpd0, ErrorData.ErrorCount);
 3136         MPT_2_HOST16(rpd0, ErrorData.SmartCount);
 3137 }
 3138 
 3139 void
 3140 mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *vi)
 3141 {
 3142 
 3143         MPT_2_HOST16(vi, TotalBlocks.High);
 3144         MPT_2_HOST16(vi, TotalBlocks.Low);
 3145         MPT_2_HOST16(vi, BlocksRemaining.High);
 3146         MPT_2_HOST16(vi, BlocksRemaining.Low);
 3147 }
 3148 #endif

Cache object: a9ec88e660e4c32345e1fdf8ee811cef


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.