The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mpt/mpt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Generic routines for LSI Fusion adapters.
    3  * FreeBSD Version.
    4  *
    5  * Copyright (c) 2000, 2001 by Greg Ansley
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice immediately at the beginning of the file, without modification,
   12  *    this list of conditions, and the following disclaimer.
   13  * 2. The name of the author may not be used to endorse or promote products
   14  *    derived from this software without specific prior written permission.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  */
   28 /*-
   29  * Copyright (c) 2002, 2006 by Matthew Jacob
   30  * All rights reserved.
   31  *
   32  * Redistribution and use in source and binary forms, with or without
   33  * modification, are permitted provided that the following conditions are
   34  * met:
   35  * 1. Redistributions of source code must retain the above copyright
   36  *    notice, this list of conditions and the following disclaimer.
   37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
   38  *    substantially similar to the "NO WARRANTY" disclaimer below
   39  *    ("Disclaimer") and any redistribution must be conditioned upon including
   40  *    a substantially similar Disclaimer requirement for further binary
   41  *    redistribution.
   42  * 3. Neither the names of the above listed copyright holders nor the names
   43  *    of any contributors may be used to endorse or promote products derived
   44  *    from this software without specific prior written permission.
   45  *
   46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
   56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   57  *
   58  * Support from Chris Ellsworth in order to make SAS adapters work
   59  * is gratefully acknowledged.
   60  *
   61  *
   62  * Support from LSI-Logic has also gone a great deal toward making this a
   63  * workable subsystem and is gratefully acknowledged.
   64  */
   65 /*-
   66  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
   67  * Copyright (c) 2005, WHEEL Sp. z o.o.
   68  * Copyright (c) 2004, 2005 Justin T. Gibbs
   69  * All rights reserved.
   70  *
   71  * Redistribution and use in source and binary forms, with or without
   72  * modification, are permitted provided that the following conditions are
   73  * met:
   74  * 1. Redistributions of source code must retain the above copyright
   75  *    notice, this list of conditions and the following disclaimer.
   76  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
   77  *    substantially similar to the "NO WARRANTY" disclaimer below
   78  *    ("Disclaimer") and any redistribution must be conditioned upon including
   79  *    a substantially similar Disclaimer requirement for further binary
   80  *    redistribution.
   81  * 3. Neither the names of the above listed copyright holders nor the names
   82  *    of any contributors may be used to endorse or promote products derived
   83  *    from this software without specific prior written permission.
   84  *
   85  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   86  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   87  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   88  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   89  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   90  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   91  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   92  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   93  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   94  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
   95  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   96  */
   97 
   98 #include <sys/cdefs.h>
   99 __FBSDID("$FreeBSD: releng/8.3/sys/dev/mpt/mpt.c 231630 2012-02-14 01:15:26Z marius $");
  100 
  101 #include <dev/mpt/mpt.h>
  102 #include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
  103 #include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
  104 
  105 #include <dev/mpt/mpilib/mpi.h>
  106 #include <dev/mpt/mpilib/mpi_ioc.h>
  107 #include <dev/mpt/mpilib/mpi_fc.h>
  108 #include <dev/mpt/mpilib/mpi_targ.h>
  109 
  110 #include <sys/sysctl.h>
  111 
  112 #define MPT_MAX_TRYS 3
  113 #define MPT_MAX_WAIT 300000
  114 
  115 static int maxwait_ack = 0;
  116 static int maxwait_int = 0;
  117 static int maxwait_state = 0;
  118 
  119 static TAILQ_HEAD(, mpt_softc)  mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
  120 mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
  121 
  122 static mpt_reply_handler_t mpt_default_reply_handler;
  123 static mpt_reply_handler_t mpt_config_reply_handler;
  124 static mpt_reply_handler_t mpt_handshake_reply_handler;
  125 static mpt_reply_handler_t mpt_event_reply_handler;
  126 static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
  127                                MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
  128 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
  129 static int mpt_soft_reset(struct mpt_softc *mpt);
  130 static void mpt_hard_reset(struct mpt_softc *mpt);
  131 static int mpt_dma_buf_alloc(struct mpt_softc *mpt);
  132 static void mpt_dma_buf_free(struct mpt_softc *mpt);
  133 static int mpt_configure_ioc(struct mpt_softc *mpt, int, int);
  134 static int mpt_enable_ioc(struct mpt_softc *mpt, int);
  135 
  136 /************************* Personality Module Support *************************/
  137 /*
  138  * We include one extra entry that is guaranteed to be NULL
  139  * to simplify our itterator.
  140  */
  141 static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
  142 static __inline struct mpt_personality*
  143         mpt_pers_find(struct mpt_softc *, u_int);
  144 static __inline struct mpt_personality*
  145         mpt_pers_find_reverse(struct mpt_softc *, u_int);
  146 
  147 static __inline struct mpt_personality *
  148 mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
  149 {
  150         KASSERT(start_at <= MPT_MAX_PERSONALITIES,
  151                 ("mpt_pers_find: starting position out of range"));
  152 
  153         while (start_at < MPT_MAX_PERSONALITIES
  154             && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
  155                 start_at++;
  156         }
  157         return (mpt_personalities[start_at]);
  158 }
  159 
  160 /*
  161  * Used infrequently, so no need to optimize like a forward
  162  * traversal where we use the MAX+1 is guaranteed to be NULL
  163  * trick.
  164  */
  165 static __inline struct mpt_personality *
  166 mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
  167 {
  168         while (start_at < MPT_MAX_PERSONALITIES
  169             && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
  170                 start_at--;
  171         }
  172         if (start_at < MPT_MAX_PERSONALITIES)
  173                 return (mpt_personalities[start_at]);
  174         return (NULL);
  175 }
  176 
  177 #define MPT_PERS_FOREACH(mpt, pers)                             \
  178         for (pers = mpt_pers_find(mpt, /*start_at*/0);          \
  179              pers != NULL;                                      \
  180              pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
  181 
  182 #define MPT_PERS_FOREACH_REVERSE(mpt, pers)                             \
  183         for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
  184              pers != NULL;                                              \
  185              pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
  186 
  187 static mpt_load_handler_t      mpt_stdload;
  188 static mpt_probe_handler_t     mpt_stdprobe;
  189 static mpt_attach_handler_t    mpt_stdattach;
  190 static mpt_enable_handler_t    mpt_stdenable;
  191 static mpt_ready_handler_t     mpt_stdready;
  192 static mpt_event_handler_t     mpt_stdevent;
  193 static mpt_reset_handler_t     mpt_stdreset;
  194 static mpt_shutdown_handler_t  mpt_stdshutdown;
  195 static mpt_detach_handler_t    mpt_stddetach;
  196 static mpt_unload_handler_t    mpt_stdunload;
  197 static struct mpt_personality mpt_default_personality =
  198 {
  199         .load           = mpt_stdload,
  200         .probe          = mpt_stdprobe,
  201         .attach         = mpt_stdattach,
  202         .enable         = mpt_stdenable,
  203         .ready          = mpt_stdready,
  204         .event          = mpt_stdevent,
  205         .reset          = mpt_stdreset,
  206         .shutdown       = mpt_stdshutdown,
  207         .detach         = mpt_stddetach,
  208         .unload         = mpt_stdunload
  209 };
  210 
  211 static mpt_load_handler_t      mpt_core_load;
  212 static mpt_attach_handler_t    mpt_core_attach;
  213 static mpt_enable_handler_t    mpt_core_enable;
  214 static mpt_reset_handler_t     mpt_core_ioc_reset;
  215 static mpt_event_handler_t     mpt_core_event;
  216 static mpt_shutdown_handler_t  mpt_core_shutdown;
  217 static mpt_shutdown_handler_t  mpt_core_detach;
  218 static mpt_unload_handler_t    mpt_core_unload;
  219 static struct mpt_personality mpt_core_personality =
  220 {
  221         .name           = "mpt_core",
  222         .load           = mpt_core_load,
  223 //      .attach         = mpt_core_attach,
  224 //      .enable         = mpt_core_enable,
  225         .event          = mpt_core_event,
  226         .reset          = mpt_core_ioc_reset,
  227         .shutdown       = mpt_core_shutdown,
  228         .detach         = mpt_core_detach,
  229         .unload         = mpt_core_unload,
  230 };
  231 
  232 /*
  233  * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
  234  * ordering information.  We want the core to always register FIRST.
  235  * other modules are set to SI_ORDER_SECOND.
  236  */
  237 static moduledata_t mpt_core_mod = {
  238         "mpt_core", mpt_modevent, &mpt_core_personality
  239 };
  240 DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
  241 MODULE_VERSION(mpt_core, 1);
  242 
  243 #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
  244 
  245 int
  246 mpt_modevent(module_t mod, int type, void *data)
  247 {
  248         struct mpt_personality *pers;
  249         int error;
  250 
  251         pers = (struct mpt_personality *)data;
  252 
  253         error = 0;
  254         switch (type) {
  255         case MOD_LOAD:
  256         {
  257                 mpt_load_handler_t **def_handler;
  258                 mpt_load_handler_t **pers_handler;
  259                 int i;
  260 
  261                 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
  262                         if (mpt_personalities[i] == NULL)
  263                                 break;
  264                 }
  265                 if (i >= MPT_MAX_PERSONALITIES) {
  266                         error = ENOMEM;
  267                         break;
  268                 }
  269                 pers->id = i;
  270                 mpt_personalities[i] = pers;
  271 
  272                 /* Install standard/noop handlers for any NULL entries. */
  273                 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
  274                 pers_handler = MPT_PERS_FIRST_HANDLER(pers);
  275                 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
  276                         if (*pers_handler == NULL)
  277                                 *pers_handler = *def_handler;
  278                         pers_handler++;
  279                         def_handler++;
  280                 }
  281                 
  282                 error = (pers->load(pers));
  283                 if (error != 0)
  284                         mpt_personalities[i] = NULL;
  285                 break;
  286         }
  287         case MOD_SHUTDOWN:
  288                 break;
  289 #if __FreeBSD_version >= 500000
  290         case MOD_QUIESCE:
  291                 break;
  292 #endif
  293         case MOD_UNLOAD:
  294                 error = pers->unload(pers);
  295                 mpt_personalities[pers->id] = NULL;
  296                 break;
  297         default:
  298                 error = EINVAL;
  299                 break;
  300         }
  301         return (error);
  302 }
  303 
  304 static int
  305 mpt_stdload(struct mpt_personality *pers)
  306 {
  307 
  308         /* Load is always successful. */
  309         return (0);
  310 }
  311 
  312 static int
  313 mpt_stdprobe(struct mpt_softc *mpt)
  314 {
  315 
  316         /* Probe is always successful. */
  317         return (0);
  318 }
  319 
  320 static int
  321 mpt_stdattach(struct mpt_softc *mpt)
  322 {
  323 
  324         /* Attach is always successful. */
  325         return (0);
  326 }
  327 
  328 static int
  329 mpt_stdenable(struct mpt_softc *mpt)
  330 {
  331 
  332         /* Enable is always successful. */
  333         return (0);
  334 }
  335 
  336 static void
  337 mpt_stdready(struct mpt_softc *mpt)
  338 {
  339 
  340 }
  341 
  342 static int
  343 mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
  344 {
  345 
  346         mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
  347         /* Event was not for us. */
  348         return (0);
  349 }
  350 
  351 static void
  352 mpt_stdreset(struct mpt_softc *mpt, int type)
  353 {
  354 
  355 }
  356 
  357 static void
  358 mpt_stdshutdown(struct mpt_softc *mpt)
  359 {
  360 
  361 }
  362 
  363 static void
  364 mpt_stddetach(struct mpt_softc *mpt)
  365 {
  366 
  367 }
  368 
  369 static int
  370 mpt_stdunload(struct mpt_personality *pers)
  371 {
  372 
  373         /* Unload is always successful. */
  374         return (0);
  375 }
  376 
  377 /*
  378  * Post driver attachment, we may want to perform some global actions.
  379  * Here is the hook to do so.
  380  */
  381 
  382 static void
  383 mpt_postattach(void *unused)
  384 {
  385         struct mpt_softc *mpt;
  386         struct mpt_personality *pers;
  387 
  388         TAILQ_FOREACH(mpt, &mpt_tailq, links) {
  389                 MPT_PERS_FOREACH(mpt, pers)
  390                         pers->ready(mpt);
  391         }
  392 }
  393 SYSINIT(mptdev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, mpt_postattach, NULL);
  394 
  395 /******************************* Bus DMA Support ******************************/
  396 void
  397 mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  398 {
  399         struct mpt_map_info *map_info;
  400 
  401         map_info = (struct mpt_map_info *)arg;
  402         map_info->error = error;
  403         map_info->phys = segs->ds_addr;
  404 }
  405 
  406 /**************************** Reply/Event Handling ****************************/
  407 int
  408 mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
  409                      mpt_handler_t handler, uint32_t *phandler_id)
  410 {
  411 
  412         switch (type) {
  413         case MPT_HANDLER_REPLY:
  414         {
  415                 u_int cbi;
  416                 u_int free_cbi;
  417 
  418                 if (phandler_id == NULL)
  419                         return (EINVAL);
  420 
  421                 free_cbi = MPT_HANDLER_ID_NONE;
  422                 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
  423                         /*
  424                          * If the same handler is registered multiple
  425                          * times, don't error out.  Just return the
  426                          * index of the original registration.
  427                          */
  428                         if (mpt_reply_handlers[cbi] == handler.reply_handler) {
  429                                 *phandler_id = MPT_CBI_TO_HID(cbi);
  430                                 return (0);
  431                         }
  432 
  433                         /*
  434                          * Fill from the front in the hope that
  435                          * all registered handlers consume only a
  436                          * single cache line.
  437                          *
  438                          * We don't break on the first empty slot so
  439                          * that the full table is checked to see if
  440                          * this handler was previously registered.
  441                          */
  442                         if (free_cbi == MPT_HANDLER_ID_NONE &&
  443                             (mpt_reply_handlers[cbi]
  444                           == mpt_default_reply_handler))
  445                                 free_cbi = cbi;
  446                 }
  447                 if (free_cbi == MPT_HANDLER_ID_NONE) {
  448                         return (ENOMEM);
  449                 }
  450                 mpt_reply_handlers[free_cbi] = handler.reply_handler;
  451                 *phandler_id = MPT_CBI_TO_HID(free_cbi);
  452                 break;
  453         }
  454         default:
  455                 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
  456                 return (EINVAL);
  457         }
  458         return (0);
  459 }
  460 
  461 int
  462 mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
  463                        mpt_handler_t handler, uint32_t handler_id)
  464 {
  465 
  466         switch (type) {
  467         case MPT_HANDLER_REPLY:
  468         {
  469                 u_int cbi;
  470 
  471                 cbi = MPT_CBI(handler_id);
  472                 if (cbi >= MPT_NUM_REPLY_HANDLERS
  473                  || mpt_reply_handlers[cbi] != handler.reply_handler)
  474                         return (ENOENT);
  475                 mpt_reply_handlers[cbi] = mpt_default_reply_handler;
  476                 break;
  477         }
  478         default:
  479                 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
  480                 return (EINVAL);
  481         }
  482         return (0);
  483 }
  484 
  485 static int
  486 mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
  487         uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
  488 {
  489 
  490         mpt_prt(mpt,
  491             "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
  492             req, req->serno, reply_desc, reply_frame);
  493 
  494         if (reply_frame != NULL)
  495                 mpt_dump_reply_frame(mpt, reply_frame);
  496 
  497         mpt_prt(mpt, "Reply Frame Ignored\n");
  498 
  499         return (/*free_reply*/TRUE);
  500 }
  501 
  502 static int
  503 mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
  504  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
  505 {
  506 
  507         if (req != NULL) {
  508                 if (reply_frame != NULL) {
  509                         MSG_CONFIG *cfgp;
  510                         MSG_CONFIG_REPLY *reply;
  511 
  512                         cfgp = (MSG_CONFIG *)req->req_vbuf;
  513                         reply = (MSG_CONFIG_REPLY *)reply_frame;
  514                         req->IOCStatus = le16toh(reply_frame->IOCStatus);
  515                         bcopy(&reply->Header, &cfgp->Header,
  516                               sizeof(cfgp->Header));
  517                         cfgp->ExtPageLength = reply->ExtPageLength;
  518                         cfgp->ExtPageType = reply->ExtPageType;
  519                 }
  520                 req->state &= ~REQ_STATE_QUEUED;
  521                 req->state |= REQ_STATE_DONE;
  522                 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
  523                 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
  524                         wakeup(req);
  525                 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
  526                         /*
  527                          * Whew- we can free this request (late completion)
  528                          */
  529                         mpt_free_request(mpt, req);
  530                 }
  531         }
  532 
  533         return (TRUE);
  534 }
  535 
  536 static int
  537 mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
  538  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
  539 {
  540 
  541         /* Nothing to be done. */
  542         return (TRUE);
  543 }
  544 
  545 static int
  546 mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
  547     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
  548 {
  549         int free_reply;
  550 
  551         KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler"));
  552         KASSERT(req != NULL, ("null request in mpt_event_reply_handler"));
  553 
  554         free_reply = TRUE;
  555         switch (reply_frame->Function) {
  556         case MPI_FUNCTION_EVENT_NOTIFICATION:
  557         {
  558                 MSG_EVENT_NOTIFY_REPLY *msg;
  559                 struct mpt_personality *pers;
  560                 u_int handled;
  561 
  562                 handled = 0;
  563                 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
  564                 msg->EventDataLength = le16toh(msg->EventDataLength);
  565                 msg->IOCStatus = le16toh(msg->IOCStatus);
  566                 msg->IOCLogInfo = le32toh(msg->IOCLogInfo);
  567                 msg->Event = le32toh(msg->Event);
  568                 MPT_PERS_FOREACH(mpt, pers)
  569                         handled += pers->event(mpt, req, msg);
  570 
  571                 if (handled == 0 && mpt->mpt_pers_mask == 0) {
  572                         mpt_lprt(mpt, MPT_PRT_INFO,
  573                                 "No Handlers For Any Event Notify Frames. "
  574                                 "Event %#x (ACK %sequired).\n",
  575                                 msg->Event, msg->AckRequired? "r" : "not r");
  576                 } else if (handled == 0) {
  577                         mpt_lprt(mpt,
  578                                 msg->AckRequired? MPT_PRT_WARN : MPT_PRT_INFO,
  579                                 "Unhandled Event Notify Frame. Event %#x "
  580                                 "(ACK %sequired).\n",
  581                                 msg->Event, msg->AckRequired? "r" : "not r");
  582                 }
  583 
  584                 if (msg->AckRequired) {
  585                         request_t *ack_req;
  586                         uint32_t context;
  587 
  588                         context = req->index | MPT_REPLY_HANDLER_EVENTS;
  589                         ack_req = mpt_get_request(mpt, FALSE);
  590                         if (ack_req == NULL) {
  591                                 struct mpt_evtf_record *evtf;
  592 
  593                                 evtf = (struct mpt_evtf_record *)reply_frame;
  594                                 evtf->context = context;
  595                                 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
  596                                 free_reply = FALSE;
  597                                 break;
  598                         }
  599                         mpt_send_event_ack(mpt, ack_req, msg, context);
  600                         /*
  601                          * Don't check for CONTINUATION_REPLY here
  602                          */
  603                         return (free_reply);
  604                 }
  605                 break;
  606         }
  607         case MPI_FUNCTION_PORT_ENABLE:
  608                 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
  609                 break;
  610         case MPI_FUNCTION_EVENT_ACK:
  611                 break;
  612         default:
  613                 mpt_prt(mpt, "unknown event function: %x\n",
  614                         reply_frame->Function);
  615                 break;
  616         }
  617 
  618         /*
  619          * I'm not sure that this continuation stuff works as it should.
  620          *
  621          * I've had FC async events occur that free the frame up because
  622          * the continuation bit isn't set, and then additional async events
  623          * then occur using the same context. As you might imagine, this
  624          * leads to Very Bad Thing.
  625          *
  626          *  Let's just be safe for now and not free them up until we figure
  627          * out what's actually happening here.
  628          */
  629 #if     0
  630         if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
  631                 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
  632                 mpt_free_request(mpt, req);
  633                 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
  634                     reply_frame->Function, req, req->serno);
  635                 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
  636                         MSG_EVENT_NOTIFY_REPLY *msg =
  637                             (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
  638                         mpt_prtc(mpt, " Event=0x%x AckReq=%d",
  639                             msg->Event, msg->AckRequired);
  640                 }
  641         } else {
  642                 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
  643                     reply_frame->Function, req, req->serno);
  644                 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
  645                         MSG_EVENT_NOTIFY_REPLY *msg =
  646                             (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
  647                         mpt_prtc(mpt, " Event=0x%x AckReq=%d",
  648                             msg->Event, msg->AckRequired);
  649                 }
  650                 mpt_prtc(mpt, "\n");
  651         }
  652 #endif
  653         return (free_reply);
  654 }
  655 
  656 /*
  657  * Process an asynchronous event from the IOC.
  658  */
  659 static int
  660 mpt_core_event(struct mpt_softc *mpt, request_t *req,
  661                MSG_EVENT_NOTIFY_REPLY *msg)
  662 {
  663 
  664         mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
  665                  msg->Event & 0xFF);
  666         switch(msg->Event & 0xFF) {
  667         case MPI_EVENT_NONE:
  668                 break;
  669         case MPI_EVENT_LOG_DATA:
  670         {
  671                 int i;
  672 
  673                 /* Some error occurred that LSI wants logged */
  674                 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
  675                         msg->IOCLogInfo);
  676                 mpt_prt(mpt, "\tEvtLogData: Event Data:");
  677                 for (i = 0; i < msg->EventDataLength; i++)
  678                         mpt_prtc(mpt, "  %08x", msg->Data[i]);
  679                 mpt_prtc(mpt, "\n");
  680                 break;
  681         }
  682         case MPI_EVENT_EVENT_CHANGE:
  683                 /*
  684                  * This is just an acknowledgement
  685                  * of our mpt_send_event_request.
  686                  */
  687                 break;
  688         case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
  689                 break;
  690         default:
  691                 return (0);
  692                 break;
  693         }
  694         return (1);
  695 }
  696 
  697 static void
  698 mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
  699                    MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
  700 {
  701         MSG_EVENT_ACK *ackp;
  702 
  703         ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
  704         memset(ackp, 0, sizeof (*ackp));
  705         ackp->Function = MPI_FUNCTION_EVENT_ACK;
  706         ackp->Event = htole32(msg->Event);
  707         ackp->EventContext = htole32(msg->EventContext);
  708         ackp->MsgContext = htole32(context);
  709         mpt_check_doorbell(mpt);
  710         mpt_send_cmd(mpt, ack_req);
  711 }
  712 
  713 /***************************** Interrupt Handling *****************************/
  714 void
  715 mpt_intr(void *arg)
  716 {
  717         struct mpt_softc *mpt;
  718         uint32_t reply_desc;
  719         int ntrips = 0;
  720 
  721         mpt = (struct mpt_softc *)arg;
  722         mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
  723         MPT_LOCK_ASSERT(mpt);
  724 
  725         while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
  726                 request_t         *req;
  727                 MSG_DEFAULT_REPLY *reply_frame;
  728                 uint32_t           reply_baddr;
  729                 uint32_t           ctxt_idx;
  730                 u_int              cb_index;
  731                 u_int              req_index;
  732                 u_int              offset;
  733                 int                free_rf;
  734 
  735                 req = NULL;
  736                 reply_frame = NULL;
  737                 reply_baddr = 0;
  738                 offset = 0;
  739                 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
  740                         /*
  741                          * Ensure that the reply frame is coherent.
  742                          */
  743                         reply_baddr = MPT_REPLY_BADDR(reply_desc);
  744                         offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
  745                         bus_dmamap_sync_range(mpt->reply_dmat,
  746                             mpt->reply_dmap, offset, MPT_REPLY_SIZE,
  747                             BUS_DMASYNC_POSTREAD);
  748                         reply_frame = MPT_REPLY_OTOV(mpt, offset);
  749                         ctxt_idx = le32toh(reply_frame->MsgContext);
  750                 } else {
  751                         uint32_t type;
  752 
  753                         type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
  754                         ctxt_idx = reply_desc;
  755                         mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
  756                                     reply_desc);
  757 
  758                         switch (type) {
  759                         case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
  760                                 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
  761                                 break;
  762                         case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
  763                                 ctxt_idx = GET_IO_INDEX(reply_desc);
  764                                 if (mpt->tgt_cmd_ptrs == NULL) {
  765                                         mpt_prt(mpt,
  766                                             "mpt_intr: no target cmd ptrs\n");
  767                                         reply_desc = MPT_REPLY_EMPTY;
  768                                         break;
  769                                 }
  770                                 if (ctxt_idx >= mpt->tgt_cmds_allocated) {
  771                                         mpt_prt(mpt,
  772                                             "mpt_intr: bad tgt cmd ctxt %u\n",
  773                                             ctxt_idx);
  774                                         reply_desc = MPT_REPLY_EMPTY;
  775                                         ntrips = 1000;
  776                                         break;
  777                                 }
  778                                 req = mpt->tgt_cmd_ptrs[ctxt_idx];
  779                                 if (req == NULL) {
  780                                         mpt_prt(mpt, "no request backpointer "
  781                                             "at index %u", ctxt_idx);
  782                                         reply_desc = MPT_REPLY_EMPTY;
  783                                         ntrips = 1000;
  784                                         break;
  785                                 }
  786                                 /*
  787                                  * Reformulate ctxt_idx to be just as if
  788                                  * it were another type of context reply
  789                                  * so the code below will find the request
  790                                  * via indexing into the pool.
  791                                  */
  792                                 ctxt_idx =
  793                                     req->index | mpt->scsi_tgt_handler_id;
  794                                 req = NULL;
  795                                 break;
  796                         case MPI_CONTEXT_REPLY_TYPE_LAN:
  797                                 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
  798                                     reply_desc);
  799                                 reply_desc = MPT_REPLY_EMPTY;
  800                                 break;
  801                         default:
  802                                 mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
  803                                 reply_desc = MPT_REPLY_EMPTY;
  804                                 break;
  805                         }
  806                         if (reply_desc == MPT_REPLY_EMPTY) {
  807                                 if (ntrips++ > 1000) {
  808                                         break;
  809                                 }
  810                                 continue;
  811                         }
  812                 }
  813 
  814                 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
  815                 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
  816                 if (req_index < MPT_MAX_REQUESTS(mpt)) {
  817                         req = &mpt->request_pool[req_index];
  818                 } else {
  819                         mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
  820                             " 0x%x)\n", req_index, reply_desc);
  821                 }
  822 
  823                 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
  824                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  825                 free_rf = mpt_reply_handlers[cb_index](mpt, req,
  826                     reply_desc, reply_frame);
  827 
  828                 if (reply_frame != NULL && free_rf) {
  829                         bus_dmamap_sync_range(mpt->reply_dmat,
  830                             mpt->reply_dmap, offset, MPT_REPLY_SIZE,
  831                             BUS_DMASYNC_PREREAD);
  832                         mpt_free_reply(mpt, reply_baddr);
  833                 }
  834 
  835                 /*
  836                  * If we got ourselves disabled, don't get stuck in a loop
  837                  */
  838                 if (mpt->disabled) {
  839                         mpt_disable_ints(mpt);
  840                         break;
  841                 }
  842                 if (ntrips++ > 1000) {
  843                         break;
  844                 }
  845         }
  846         mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n");
  847 }
  848 
  849 /******************************* Error Recovery *******************************/
  850 void
  851 mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
  852                             u_int iocstatus)
  853 {
  854         MSG_DEFAULT_REPLY  ioc_status_frame;
  855         request_t         *req;
  856 
  857         memset(&ioc_status_frame, 0, sizeof(ioc_status_frame));
  858         ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
  859         ioc_status_frame.IOCStatus = iocstatus;
  860         while((req = TAILQ_FIRST(chain)) != NULL) {
  861                 MSG_REQUEST_HEADER *msg_hdr;
  862                 u_int               cb_index;
  863 
  864                 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
  865                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  866                 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
  867                 ioc_status_frame.Function = msg_hdr->Function;
  868                 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
  869                 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
  870                 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
  871                     &ioc_status_frame);
  872                 if (mpt_req_on_pending_list(mpt, req) != 0)
  873                         TAILQ_REMOVE(chain, req, links);
  874         }
  875 }
  876 
  877 /********************************* Diagnostics ********************************/
  878 /*
  879  * Perform a diagnostic dump of a reply frame.
  880  */
  881 void
  882 mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
  883 {
  884 
  885         mpt_prt(mpt, "Address Reply:\n");
  886         mpt_print_reply(reply_frame);
  887 }
  888 
  889 /******************************* Doorbell Access ******************************/
  890 static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
  891 static __inline  uint32_t mpt_rd_intr(struct mpt_softc *mpt);
  892 
  893 static __inline uint32_t
  894 mpt_rd_db(struct mpt_softc *mpt)
  895 {
  896 
  897         return mpt_read(mpt, MPT_OFFSET_DOORBELL);
  898 }
  899 
  900 static __inline uint32_t
  901 mpt_rd_intr(struct mpt_softc *mpt)
  902 {
  903 
  904         return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
  905 }
  906 
  907 /* Busy wait for a door bell to be read by IOC */
  908 static int
  909 mpt_wait_db_ack(struct mpt_softc *mpt)
  910 {
  911         int i;
  912 
  913         for (i=0; i < MPT_MAX_WAIT; i++) {
  914                 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
  915                         maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
  916                         return (MPT_OK);
  917                 }
  918                 DELAY(200);
  919         }
  920         return (MPT_FAIL);
  921 }
  922 
  923 /* Busy wait for a door bell interrupt */
  924 static int
  925 mpt_wait_db_int(struct mpt_softc *mpt)
  926 {
  927         int i;
  928 
  929         for (i = 0; i < MPT_MAX_WAIT; i++) {
  930                 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
  931                         maxwait_int = i > maxwait_int ? i : maxwait_int;
  932                         return MPT_OK;
  933                 }
  934                 DELAY(100);
  935         }
  936         return (MPT_FAIL);
  937 }
  938 
  939 /* Wait for IOC to transition to a give state */
  940 void
  941 mpt_check_doorbell(struct mpt_softc *mpt)
  942 {
  943         uint32_t db = mpt_rd_db(mpt);
  944 
  945         if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
  946                 mpt_prt(mpt, "Device not running\n");
  947                 mpt_print_db(db);
  948         }
  949 }
  950 
  951 /* Wait for IOC to transition to a give state */
  952 static int
  953 mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
  954 {
  955         int i;
  956 
  957         for (i = 0; i < MPT_MAX_WAIT; i++) {
  958                 uint32_t db = mpt_rd_db(mpt);
  959                 if (MPT_STATE(db) == state) {
  960                         maxwait_state = i > maxwait_state ? i : maxwait_state;
  961                         return (MPT_OK);
  962                 }
  963                 DELAY(100);
  964         }
  965         return (MPT_FAIL);
  966 }
  967 
  968 
  969 /************************* Intialization/Configuration ************************/
  970 static int mpt_download_fw(struct mpt_softc *mpt);
  971 
  972 /* Issue the reset COMMAND to the IOC */
  973 static int
  974 mpt_soft_reset(struct mpt_softc *mpt)
  975 {
  976 
  977         mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
  978 
  979         /* Have to use hard reset if we are not in Running state */
  980         if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
  981                 mpt_prt(mpt, "soft reset failed: device not running\n");
  982                 return (MPT_FAIL);
  983         }
  984 
  985         /* If door bell is in use we don't have a chance of getting
  986          * a word in since the IOC probably crashed in message
  987          * processing. So don't waste our time.
  988          */
  989         if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
  990                 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
  991                 return (MPT_FAIL);
  992         }
  993 
  994         /* Send the reset request to the IOC */
  995         mpt_write(mpt, MPT_OFFSET_DOORBELL,
  996             MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
  997         if (mpt_wait_db_ack(mpt) != MPT_OK) {
  998                 mpt_prt(mpt, "soft reset failed: ack timeout\n");
  999                 return (MPT_FAIL);
 1000         }
 1001 
 1002         /* Wait for the IOC to reload and come out of reset state */
 1003         if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
 1004                 mpt_prt(mpt, "soft reset failed: device did not restart\n");
 1005                 return (MPT_FAIL);
 1006         }
 1007 
 1008         return MPT_OK;
 1009 }
 1010 
 1011 static int
 1012 mpt_enable_diag_mode(struct mpt_softc *mpt)
 1013 {
 1014         int try;
 1015 
 1016         try = 20;
 1017         while (--try) {
 1018 
 1019                 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
 1020                         break;
 1021 
 1022                 /* Enable diagnostic registers */
 1023                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
 1024                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
 1025                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
 1026                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
 1027                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
 1028                 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
 1029 
 1030                 DELAY(100000);
 1031         }
 1032         if (try == 0)
 1033                 return (EIO);
 1034         return (0);
 1035 }
 1036 
 1037 static void
 1038 mpt_disable_diag_mode(struct mpt_softc *mpt)
 1039 {
 1040 
 1041         mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
 1042 }
 1043 
 1044 /* This is a magic diagnostic reset that resets all the ARM
 1045  * processors in the chip.
 1046  */
 1047 static void
 1048 mpt_hard_reset(struct mpt_softc *mpt)
 1049 {
 1050         int error;
 1051         int wait;
 1052         uint32_t diagreg;
 1053 
 1054         mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
 1055 
 1056         if (mpt->is_1078) {
 1057                 mpt_write(mpt, MPT_OFFSET_RESET_1078, 0x07);
 1058                 DELAY(1000);
 1059                 return;
 1060         }
 1061 
 1062         error = mpt_enable_diag_mode(mpt);
 1063         if (error) {
 1064                 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
 1065                 mpt_prt(mpt, "Trying to reset anyway.\n");
 1066         }
 1067 
 1068         diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
 1069 
 1070         /*
 1071          * This appears to be a workaround required for some
 1072          * firmware or hardware revs.
 1073          */
 1074         mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
 1075         DELAY(1000);
 1076 
 1077         /* Diag. port is now active so we can now hit the reset bit */
 1078         mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
 1079 
 1080         /*
 1081          * Ensure that the reset has finished.  We delay 1ms
 1082          * prior to reading the register to make sure the chip
 1083          * has sufficiently completed its reset to handle register
 1084          * accesses.
 1085          */
 1086         wait = 5000;
 1087         do {
 1088                 DELAY(1000);
 1089                 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
 1090         } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
 1091 
 1092         if (wait == 0) {
 1093                 mpt_prt(mpt, "WARNING - Failed hard reset! "
 1094                         "Trying to initialize anyway.\n");
 1095         }
 1096 
 1097         /*
 1098          * If we have firmware to download, it must be loaded before
 1099          * the controller will become operational.  Do so now.
 1100          */
 1101         if (mpt->fw_image != NULL) {
 1102 
 1103                 error = mpt_download_fw(mpt);
 1104 
 1105                 if (error) {
 1106                         mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
 1107                         mpt_prt(mpt, "Trying to initialize anyway.\n");
 1108                 }
 1109         }
 1110 
 1111         /*
 1112          * Reseting the controller should have disabled write
 1113          * access to the diagnostic registers, but disable
 1114          * manually to be sure.
 1115          */
 1116         mpt_disable_diag_mode(mpt);
 1117 }
 1118 
 1119 static void
 1120 mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
 1121 {
 1122 
 1123         /*
 1124          * Complete all pending requests with a status
 1125          * appropriate for an IOC reset.
 1126          */
 1127         mpt_complete_request_chain(mpt, &mpt->request_pending_list,
 1128                                    MPI_IOCSTATUS_INVALID_STATE);
 1129 }
 1130 
 1131 /*
 1132  * Reset the IOC when needed. Try software command first then if needed
 1133  * poke at the magic diagnostic reset. Note that a hard reset resets
 1134  * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
 1135  * fouls up the PCI configuration registers.
 1136  */
 1137 int
 1138 mpt_reset(struct mpt_softc *mpt, int reinit)
 1139 {
 1140         struct  mpt_personality *pers;
 1141         int     ret;
 1142         int     retry_cnt = 0;
 1143 
 1144         /*
 1145          * Try a soft reset. If that fails, get out the big hammer.
 1146          */
 1147  again:
 1148         if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
 1149                 int     cnt;
 1150                 for (cnt = 0; cnt < 5; cnt++) {
 1151                         /* Failed; do a hard reset */
 1152                         mpt_hard_reset(mpt);
 1153 
 1154                         /*
 1155                          * Wait for the IOC to reload
 1156                          * and come out of reset state
 1157                          */
 1158                         ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
 1159                         if (ret == MPT_OK) {
 1160                                 break;
 1161                         }
 1162                         /*
 1163                          * Okay- try to check again...
 1164                          */
 1165                         ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
 1166                         if (ret == MPT_OK) {
 1167                                 break;
 1168                         }
 1169                         mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
 1170                             retry_cnt, cnt);
 1171                 }
 1172         }
 1173 
 1174         if (retry_cnt == 0) {
 1175                 /*
 1176                  * Invoke reset handlers.  We bump the reset count so
 1177                  * that mpt_wait_req() understands that regardless of
 1178                  * the specified wait condition, it should stop its wait.
 1179                  */
 1180                 mpt->reset_cnt++;
 1181                 MPT_PERS_FOREACH(mpt, pers)
 1182                         pers->reset(mpt, ret);
 1183         }
 1184 
 1185         if (reinit) {
 1186                 ret = mpt_enable_ioc(mpt, 1);
 1187                 if (ret == MPT_OK) {
 1188                         mpt_enable_ints(mpt);
 1189                 }
 1190         }
 1191         if (ret != MPT_OK && retry_cnt++ < 2) {
 1192                 goto again;
 1193         }
 1194         return ret;
 1195 }
 1196 
 1197 /* Return a command buffer to the free queue */
 1198 void
 1199 mpt_free_request(struct mpt_softc *mpt, request_t *req)
 1200 {
 1201         request_t *nxt;
 1202         struct mpt_evtf_record *record;
 1203         uint32_t offset, reply_baddr;
 1204         
 1205         if (req == NULL || req != &mpt->request_pool[req->index]) {
 1206                 panic("mpt_free_request: bad req ptr");
 1207         }
 1208         if ((nxt = req->chain) != NULL) {
 1209                 req->chain = NULL;
 1210                 mpt_free_request(mpt, nxt);     /* NB: recursion */
 1211         }
 1212         KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
 1213         KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
 1214         MPT_LOCK_ASSERT(mpt);
 1215         KASSERT(mpt_req_on_free_list(mpt, req) == 0,
 1216             ("mpt_free_request: req %p:%u func %x already on freelist",
 1217             req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
 1218         KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
 1219             ("mpt_free_request: req %p:%u func %x on pending list",
 1220             req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
 1221 #ifdef  INVARIANTS
 1222         mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__);
 1223 #endif
 1224 
 1225         req->ccb = NULL;
 1226         if (LIST_EMPTY(&mpt->ack_frames)) {
 1227                 /*
 1228                  * Insert free ones at the tail
 1229                  */
 1230                 req->serno = 0;
 1231                 req->state = REQ_STATE_FREE;
 1232 #ifdef  INVARIANTS
 1233                 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER));
 1234 #endif
 1235                 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
 1236                 if (mpt->getreqwaiter != 0) {
 1237                         mpt->getreqwaiter = 0;
 1238                         wakeup(&mpt->request_free_list);
 1239                 }
 1240                 return;
 1241         }
 1242 
 1243         /*
 1244          * Process an ack frame deferred due to resource shortage.
 1245          */
 1246         record = LIST_FIRST(&mpt->ack_frames);
 1247         LIST_REMOVE(record, links);
 1248         req->state = REQ_STATE_ALLOCATED;
 1249         mpt_assign_serno(mpt, req);
 1250         mpt_send_event_ack(mpt, req, &record->reply, record->context);
 1251         offset = (uint32_t)((uint8_t *)record - mpt->reply);
 1252         reply_baddr = offset + (mpt->reply_phys & 0xFFFFFFFF);
 1253         bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap, offset,
 1254             MPT_REPLY_SIZE, BUS_DMASYNC_PREREAD);
 1255         mpt_free_reply(mpt, reply_baddr);
 1256 }
 1257 
 1258 /* Get a command buffer from the free queue */
 1259 request_t *
 1260 mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
 1261 {
 1262         request_t *req;
 1263 
 1264 retry:
 1265         MPT_LOCK_ASSERT(mpt);
 1266         req = TAILQ_FIRST(&mpt->request_free_list);
 1267         if (req != NULL) {
 1268                 KASSERT(req == &mpt->request_pool[req->index],
 1269                     ("mpt_get_request: corrupted request free list"));
 1270                 KASSERT(req->state == REQ_STATE_FREE,
 1271                     ("req %p:%u not free on free list %x index %d function %x",
 1272                     req, req->serno, req->state, req->index,
 1273                     ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
 1274                 TAILQ_REMOVE(&mpt->request_free_list, req, links);
 1275                 req->state = REQ_STATE_ALLOCATED;
 1276                 req->chain = NULL;
 1277                 mpt_assign_serno(mpt, req);
 1278         } else if (sleep_ok != 0) {
 1279                 mpt->getreqwaiter = 1;
 1280                 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
 1281                 goto retry;
 1282         }
 1283         return (req);
 1284 }
 1285 
 1286 /* Pass the command to the IOC */
 1287 void
 1288 mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
 1289 {
 1290 
 1291         if (mpt->verbose > MPT_PRT_DEBUG2) {
 1292                 mpt_dump_request(mpt, req);
 1293         }
 1294         bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
 1295             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1296         req->state |= REQ_STATE_QUEUED;
 1297         KASSERT(mpt_req_on_free_list(mpt, req) == 0,
 1298             ("req %p:%u func %x on freelist list in mpt_send_cmd",
 1299             req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
 1300         KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
 1301             ("req %p:%u func %x already on pending list in mpt_send_cmd",
 1302             req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
 1303         TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
 1304         mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
 1305 }
 1306 
 1307 /*
 1308  * Wait for a request to complete.
 1309  *
 1310  * Inputs:
 1311  *      mpt             softc of controller executing request
 1312  *      req             request to wait for
 1313  *      sleep_ok        nonzero implies may sleep in this context
 1314  *      time_ms         timeout in ms.  0 implies no timeout.
 1315  *
 1316  * Return Values:
 1317  *      0               Request completed
 1318  *      non-0           Timeout fired before request completion.
 1319  */
 1320 int
 1321 mpt_wait_req(struct mpt_softc *mpt, request_t *req,
 1322              mpt_req_state_t state, mpt_req_state_t mask,
 1323              int sleep_ok, int time_ms)
 1324 {
 1325         int   error;
 1326         int   timeout;
 1327         u_int saved_cnt;
 1328 
 1329         /*
 1330          * timeout is in ms.  0 indicates infinite wait.
 1331          * Convert to ticks or 500us units depending on
 1332          * our sleep mode.
 1333          */
 1334         if (sleep_ok != 0) {
 1335                 timeout = (time_ms * hz) / 1000;
 1336         } else {
 1337                 timeout = time_ms * 2;
 1338         }
 1339         req->state |= REQ_STATE_NEED_WAKEUP;
 1340         mask &= ~REQ_STATE_NEED_WAKEUP;
 1341         saved_cnt = mpt->reset_cnt;
 1342         while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
 1343                 if (sleep_ok != 0) {
 1344                         error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
 1345                         if (error == EWOULDBLOCK) {
 1346                                 timeout = 0;
 1347                                 break;
 1348                         }
 1349                 } else {
 1350                         if (time_ms != 0 && --timeout == 0) {
 1351                                 break;
 1352                         }
 1353                         DELAY(500);
 1354                         mpt_intr(mpt);
 1355                 }
 1356         }
 1357         req->state &= ~REQ_STATE_NEED_WAKEUP;
 1358         if (mpt->reset_cnt != saved_cnt) {
 1359                 return (EIO);
 1360         }
 1361         if (time_ms && timeout <= 0) {
 1362                 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
 1363                 req->state |= REQ_STATE_TIMEDOUT;
 1364                 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
 1365                 return (ETIMEDOUT);
 1366         }
 1367         return (0);
 1368 }
 1369 
 1370 /*
 1371  * Send a command to the IOC via the handshake register.
 1372  *
 1373  * Only done at initialization time and for certain unusual
 1374  * commands such as device/bus reset as specified by LSI.
 1375  */
 1376 int
 1377 mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
 1378 {
 1379         int i;
 1380         uint32_t data, *data32;
 1381 
 1382         /* Check condition of the IOC */
 1383         data = mpt_rd_db(mpt);
 1384         if ((MPT_STATE(data) != MPT_DB_STATE_READY
 1385           && MPT_STATE(data) != MPT_DB_STATE_RUNNING
 1386           && MPT_STATE(data) != MPT_DB_STATE_FAULT)
 1387          || MPT_DB_IS_IN_USE(data)) {
 1388                 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
 1389                 mpt_print_db(data);
 1390                 return (EBUSY);
 1391         }
 1392 
 1393         /* We move things in 32 bit chunks */
 1394         len = (len + 3) >> 2;
 1395         data32 = cmd;
 1396 
 1397         /* Clear any left over pending doorbell interrupts */
 1398         if (MPT_DB_INTR(mpt_rd_intr(mpt)))
 1399                 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1400 
 1401         /*
 1402          * Tell the handshake reg. we are going to send a command
 1403          * and how long it is going to be.
 1404          */
 1405         data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
 1406             (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
 1407         mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
 1408 
 1409         /* Wait for the chip to notice */
 1410         if (mpt_wait_db_int(mpt) != MPT_OK) {
 1411                 mpt_prt(mpt, "mpt_send_handshake_cmd: db ignored\n");
 1412                 return (ETIMEDOUT);
 1413         }
 1414 
 1415         /* Clear the interrupt */
 1416         mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1417 
 1418         if (mpt_wait_db_ack(mpt) != MPT_OK) {
 1419                 mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n");
 1420                 return (ETIMEDOUT);
 1421         }
 1422 
 1423         /* Send the command */
 1424         for (i = 0; i < len; i++) {
 1425                 mpt_write(mpt, MPT_OFFSET_DOORBELL, htole32(*data32++));
 1426                 if (mpt_wait_db_ack(mpt) != MPT_OK) {
 1427                         mpt_prt(mpt,
 1428                             "mpt_send_handshake_cmd: timeout @ index %d\n", i);
 1429                         return (ETIMEDOUT);
 1430                 }
 1431         }
 1432         return MPT_OK;
 1433 }
 1434 
 1435 /* Get the response from the handshake register */
 1436 int
 1437 mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
 1438 {
 1439         int left, reply_left;
 1440         u_int16_t *data16;
 1441         uint32_t data;
 1442         MSG_DEFAULT_REPLY *hdr;
 1443 
 1444         /* We move things out in 16 bit chunks */
 1445         reply_len >>= 1;
 1446         data16 = (u_int16_t *)reply;
 1447 
 1448         hdr = (MSG_DEFAULT_REPLY *)reply;
 1449 
 1450         /* Get first word */
 1451         if (mpt_wait_db_int(mpt) != MPT_OK) {
 1452                 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
 1453                 return ETIMEDOUT;
 1454         }
 1455         data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
 1456         *data16++ = le16toh(data & MPT_DB_DATA_MASK);
 1457         mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1458 
 1459         /* Get Second Word */
 1460         if (mpt_wait_db_int(mpt) != MPT_OK) {
 1461                 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
 1462                 return ETIMEDOUT;
 1463         }
 1464         data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
 1465         *data16++ = le16toh(data & MPT_DB_DATA_MASK);
 1466         mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1467 
 1468         /*
 1469          * With the second word, we can now look at the length.
 1470          * Warn about a reply that's too short (except for IOC FACTS REPLY)
 1471          */
 1472         if ((reply_len >> 1) != hdr->MsgLength &&
 1473             (hdr->Function != MPI_FUNCTION_IOC_FACTS)){
 1474 #if __FreeBSD_version >= 500000
 1475                 mpt_prt(mpt, "reply length does not match message length: "
 1476                         "got %x; expected %zx for function %x\n",
 1477                         hdr->MsgLength << 2, reply_len << 1, hdr->Function);
 1478 #else
 1479                 mpt_prt(mpt, "reply length does not match message length: "
 1480                         "got %x; expected %x for function %x\n",
 1481                         hdr->MsgLength << 2, reply_len << 1, hdr->Function);
 1482 #endif
 1483         }
 1484 
 1485         /* Get rest of the reply; but don't overflow the provided buffer */
 1486         left = (hdr->MsgLength << 1) - 2;
 1487         reply_left =  reply_len - 2;
 1488         while (left--) {
 1489                 u_int16_t datum;
 1490 
 1491                 if (mpt_wait_db_int(mpt) != MPT_OK) {
 1492                         mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
 1493                         return ETIMEDOUT;
 1494                 }
 1495                 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
 1496                 datum = le16toh(data & MPT_DB_DATA_MASK);
 1497 
 1498                 if (reply_left-- > 0)
 1499                         *data16++ = datum;
 1500 
 1501                 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1502         }
 1503 
 1504         /* One more wait & clear at the end */
 1505         if (mpt_wait_db_int(mpt) != MPT_OK) {
 1506                 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
 1507                 return ETIMEDOUT;
 1508         }
 1509         mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
 1510 
 1511         if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
 1512                 if (mpt->verbose >= MPT_PRT_TRACE)
 1513                         mpt_print_reply(hdr);
 1514                 return (MPT_FAIL | hdr->IOCStatus);
 1515         }
 1516 
 1517         return (0);
 1518 }
 1519 
 1520 static int
 1521 mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
 1522 {
 1523         MSG_IOC_FACTS f_req;
 1524         int error;
 1525         
 1526         memset(&f_req, 0, sizeof f_req);
 1527         f_req.Function = MPI_FUNCTION_IOC_FACTS;
 1528         f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
 1529         error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
 1530         if (error) {
 1531                 return(error);
 1532         }
 1533         error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
 1534         return (error);
 1535 }
 1536 
 1537 static int
 1538 mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp)
 1539 {
 1540         MSG_PORT_FACTS f_req;
 1541         int error;
 1542         
 1543         memset(&f_req, 0, sizeof f_req);
 1544         f_req.Function = MPI_FUNCTION_PORT_FACTS;
 1545         f_req.PortNumber = port;
 1546         f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
 1547         error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
 1548         if (error) {
 1549                 return(error);
 1550         }
 1551         error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
 1552         return (error);
 1553 }
 1554 
 1555 /*
 1556  * Send the initialization request. This is where we specify how many
 1557  * SCSI busses and how many devices per bus we wish to emulate.
 1558  * This is also the command that specifies the max size of the reply
 1559  * frames from the IOC that we will be allocating.
 1560  */
 1561 static int
 1562 mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
 1563 {
 1564         int error = 0;
 1565         MSG_IOC_INIT init;
 1566         MSG_IOC_INIT_REPLY reply;
 1567 
 1568         memset(&init, 0, sizeof init);
 1569         init.WhoInit = who;
 1570         init.Function = MPI_FUNCTION_IOC_INIT;
 1571         init.MaxDevices = 0;    /* at least 256 devices per bus */
 1572         init.MaxBuses = 16;     /* at least 16 busses */
 1573 
 1574         init.MsgVersion = htole16(MPI_VERSION);
 1575         init.HeaderVersion = htole16(MPI_HEADER_VERSION);
 1576         init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
 1577         init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
 1578 
 1579         if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
 1580                 return(error);
 1581         }
 1582 
 1583         error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
 1584         return (error);
 1585 }
 1586 
 1587 
 1588 /*
 1589  * Utiltity routine to read configuration headers and pages
 1590  */
 1591 int
 1592 mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, cfgparms_t *params,
 1593                   bus_addr_t addr, bus_size_t len, int sleep_ok, int timeout_ms)
 1594 {
 1595         MSG_CONFIG *cfgp;
 1596         SGE_SIMPLE32 *se;
 1597 
 1598         cfgp = req->req_vbuf;
 1599         memset(cfgp, 0, sizeof *cfgp);
 1600         cfgp->Action = params->Action;
 1601         cfgp->Function = MPI_FUNCTION_CONFIG;
 1602         cfgp->Header.PageVersion = params->PageVersion;
 1603         cfgp->Header.PageNumber = params->PageNumber;
 1604         cfgp->PageAddress = htole32(params->PageAddress);
 1605         if ((params->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
 1606             MPI_CONFIG_PAGETYPE_EXTENDED) {
 1607                 cfgp->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
 1608                 cfgp->Header.PageLength = 0;
 1609                 cfgp->ExtPageLength = htole16(params->ExtPageLength);
 1610                 cfgp->ExtPageType = params->ExtPageType;
 1611         } else {
 1612                 cfgp->Header.PageType = params->PageType;
 1613                 cfgp->Header.PageLength = params->PageLength;
 1614         }
 1615         se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
 1616         se->Address = htole32(addr);
 1617         MPI_pSGE_SET_LENGTH(se, len);
 1618         MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
 1619             MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
 1620             MPI_SGE_FLAGS_END_OF_LIST |
 1621             ((params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
 1622           || params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
 1623            ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
 1624         se->FlagsLength = htole32(se->FlagsLength);
 1625         cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
 1626 
 1627         mpt_check_doorbell(mpt);
 1628         mpt_send_cmd(mpt, req);
 1629         return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
 1630                              sleep_ok, timeout_ms));
 1631 }
 1632 
 1633 int
 1634 mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, int PageNumber,
 1635                        uint32_t PageAddress, int ExtPageType,
 1636                        CONFIG_EXTENDED_PAGE_HEADER *rslt,
 1637                        int sleep_ok, int timeout_ms)
 1638 {
 1639         request_t  *req;
 1640         cfgparms_t params;
 1641         MSG_CONFIG_REPLY *cfgp;
 1642         int         error;
 1643 
 1644         req = mpt_get_request(mpt, sleep_ok);
 1645         if (req == NULL) {
 1646                 mpt_prt(mpt, "mpt_extread_cfg_header: Get request failed!\n");
 1647                 return (ENOMEM);
 1648         }
 1649 
 1650         params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
 1651         params.PageVersion = PageVersion;
 1652         params.PageLength = 0;
 1653         params.PageNumber = PageNumber;
 1654         params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
 1655         params.PageAddress = PageAddress;
 1656         params.ExtPageType = ExtPageType;
 1657         params.ExtPageLength = 0;
 1658         error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
 1659                                   sleep_ok, timeout_ms);
 1660         if (error != 0) {
 1661                 /*
 1662                  * Leave the request. Without resetting the chip, it's
 1663                  * still owned by it and we'll just get into trouble
 1664                  * freeing it now. Mark it as abandoned so that if it
 1665                  * shows up later it can be freed.
 1666                  */
 1667                 mpt_prt(mpt, "read_extcfg_header timed out\n");
 1668                 return (ETIMEDOUT);
 1669         }
 1670 
 1671         switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
 1672         case MPI_IOCSTATUS_SUCCESS:
 1673                 cfgp = req->req_vbuf;
 1674                 rslt->PageVersion = cfgp->Header.PageVersion;
 1675                 rslt->PageNumber = cfgp->Header.PageNumber;
 1676                 rslt->PageType = cfgp->Header.PageType;
 1677                 rslt->ExtPageLength = le16toh(cfgp->ExtPageLength);
 1678                 rslt->ExtPageType = cfgp->ExtPageType;
 1679                 error = 0;
 1680                 break;
 1681         case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
 1682                 mpt_lprt(mpt, MPT_PRT_DEBUG,
 1683                     "Invalid Page Type %d Number %d Addr 0x%0x\n",
 1684                     MPI_CONFIG_PAGETYPE_EXTENDED, PageNumber, PageAddress);
 1685                 error = EINVAL;
 1686                 break;
 1687         default:
 1688                 mpt_prt(mpt, "mpt_read_extcfg_header: Config Info Status %x\n",
 1689                         req->IOCStatus);
 1690                 error = EIO;
 1691                 break;
 1692         }
 1693         mpt_free_request(mpt, req);
 1694         return (error);
 1695 }
 1696 
 1697 int
 1698 mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
 1699                      CONFIG_EXTENDED_PAGE_HEADER *hdr, void *buf, size_t len,
 1700                      int sleep_ok, int timeout_ms)
 1701 {
 1702         request_t    *req;
 1703         cfgparms_t    params;
 1704         int           error;
 1705 
 1706         req = mpt_get_request(mpt, sleep_ok);
 1707         if (req == NULL) {
 1708                 mpt_prt(mpt, "mpt_read_extcfg_page: Get request failed!\n");
 1709                 return (-1);
 1710         }
 1711 
 1712         params.Action = Action;
 1713         params.PageVersion = hdr->PageVersion;
 1714         params.PageLength = 0;
 1715         params.PageNumber = hdr->PageNumber;
 1716         params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
 1717         params.PageAddress = PageAddress;
 1718         params.ExtPageType = hdr->ExtPageType;
 1719         params.ExtPageLength = hdr->ExtPageLength;
 1720         error = mpt_issue_cfg_req(mpt, req, &params,
 1721                                   req->req_pbuf + MPT_RQSL(mpt),
 1722                                   len, sleep_ok, timeout_ms);
 1723         if (error != 0) {
 1724                 mpt_prt(mpt, "read_extcfg_page(%d) timed out\n", Action);
 1725                 return (-1);
 1726         }
 1727 
 1728         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
 1729                 mpt_prt(mpt, "mpt_read_extcfg_page: Config Info Status %x\n",
 1730                         req->IOCStatus);
 1731                 mpt_free_request(mpt, req);
 1732                 return (-1);
 1733         }
 1734         memcpy(buf, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
 1735         mpt_free_request(mpt, req);
 1736         return (0);
 1737 }
 1738 
 1739 int
 1740 mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
 1741                     uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
 1742                     int sleep_ok, int timeout_ms)
 1743 {
 1744         request_t  *req;
 1745         cfgparms_t params;
 1746         MSG_CONFIG *cfgp;
 1747         int         error;
 1748 
 1749         req = mpt_get_request(mpt, sleep_ok);
 1750         if (req == NULL) {
 1751                 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
 1752                 return (ENOMEM);
 1753         }
 1754 
 1755         params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
 1756         params.PageVersion = 0;
 1757         params.PageLength = 0;
 1758         params.PageNumber = PageNumber;
 1759         params.PageType = PageType;
 1760         params.PageAddress = PageAddress;
 1761         error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
 1762                                   sleep_ok, timeout_ms);
 1763         if (error != 0) {
 1764                 /*
 1765                  * Leave the request. Without resetting the chip, it's
 1766                  * still owned by it and we'll just get into trouble
 1767                  * freeing it now. Mark it as abandoned so that if it
 1768                  * shows up later it can be freed.
 1769                  */
 1770                 mpt_prt(mpt, "read_cfg_header timed out\n");
 1771                 return (ETIMEDOUT);
 1772         }
 1773 
 1774         switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
 1775         case MPI_IOCSTATUS_SUCCESS:
 1776                 cfgp = req->req_vbuf;
 1777                 bcopy(&cfgp->Header, rslt, sizeof(*rslt));
 1778                 error = 0;
 1779                 break;
 1780         case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
 1781                 mpt_lprt(mpt, MPT_PRT_DEBUG,
 1782                     "Invalid Page Type %d Number %d Addr 0x%0x\n",
 1783                     PageType, PageNumber, PageAddress);
 1784                 error = EINVAL;
 1785                 break;
 1786         default:
 1787                 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
 1788                         req->IOCStatus);
 1789                 error = EIO;
 1790                 break;
 1791         }
 1792         mpt_free_request(mpt, req);
 1793         return (error);
 1794 }
 1795 
 1796 int
 1797 mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
 1798                   CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
 1799                   int timeout_ms)
 1800 {
 1801         request_t    *req;
 1802         cfgparms_t    params;
 1803         int           error;
 1804 
 1805         req = mpt_get_request(mpt, sleep_ok);
 1806         if (req == NULL) {
 1807                 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
 1808                 return (-1);
 1809         }
 1810 
 1811         params.Action = Action;
 1812         params.PageVersion = hdr->PageVersion;
 1813         params.PageLength = hdr->PageLength;
 1814         params.PageNumber = hdr->PageNumber;
 1815         params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
 1816         params.PageAddress = PageAddress;
 1817         error = mpt_issue_cfg_req(mpt, req, &params,
 1818                                   req->req_pbuf + MPT_RQSL(mpt),
 1819                                   len, sleep_ok, timeout_ms);
 1820         if (error != 0) {
 1821                 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
 1822                 return (-1);
 1823         }
 1824 
 1825         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
 1826                 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
 1827                         req->IOCStatus);
 1828                 mpt_free_request(mpt, req);
 1829                 return (-1);
 1830         }
 1831         memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
 1832         mpt_free_request(mpt, req);
 1833         return (0);
 1834 }
 1835 
 1836 int
 1837 mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
 1838                    CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
 1839                    int timeout_ms)
 1840 {
 1841         request_t    *req;
 1842         cfgparms_t    params;
 1843         u_int         hdr_attr;
 1844         int           error;
 1845 
 1846         hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
 1847         if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
 1848             hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
 1849                 mpt_prt(mpt, "page type 0x%x not changeable\n",
 1850                         hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
 1851                 return (-1);
 1852         }
 1853 
 1854 #if     0
 1855         /*
 1856          * We shouldn't mask off other bits here.
 1857          */
 1858         hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK;
 1859 #endif
 1860 
 1861         req = mpt_get_request(mpt, sleep_ok);
 1862         if (req == NULL)
 1863                 return (-1);
 1864 
 1865         memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len);
 1866 
 1867         /*
 1868          * There isn't any point in restoring stripped out attributes
 1869          * if you then mask them going down to issue the request.
 1870          */
 1871 
 1872         params.Action = Action;
 1873         params.PageVersion = hdr->PageVersion;
 1874         params.PageLength = hdr->PageLength;
 1875         params.PageNumber = hdr->PageNumber;
 1876         params.PageAddress = PageAddress;
 1877 #if     0
 1878         /* Restore stripped out attributes */
 1879         hdr->PageType |= hdr_attr;
 1880         params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
 1881 #else
 1882         params.PageType = hdr->PageType;
 1883 #endif
 1884         error = mpt_issue_cfg_req(mpt, req, &params,
 1885                                   req->req_pbuf + MPT_RQSL(mpt),
 1886                                   len, sleep_ok, timeout_ms);
 1887         if (error != 0) {
 1888                 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
 1889                 return (-1);
 1890         }
 1891 
 1892         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
 1893                 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
 1894                         req->IOCStatus);
 1895                 mpt_free_request(mpt, req);
 1896                 return (-1);
 1897         }
 1898         mpt_free_request(mpt, req);
 1899         return (0);
 1900 }
 1901 
 1902 /*
 1903  * Read IOC configuration information
 1904  */
 1905 static int
 1906 mpt_read_config_info_ioc(struct mpt_softc *mpt)
 1907 {
 1908         CONFIG_PAGE_HEADER hdr;
 1909         struct mpt_raid_volume *mpt_raid;
 1910         int rv;
 1911         int i;
 1912         size_t len;
 1913 
 1914         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
 1915                 2, 0, &hdr, FALSE, 5000);
 1916         /*
 1917          * If it's an invalid page, so what? Not a supported function....
 1918          */
 1919         if (rv == EINVAL) {
 1920                 return (0);
 1921         }
 1922         if (rv) {
 1923                 return (rv);
 1924         }
 1925 
 1926         mpt_lprt(mpt, MPT_PRT_DEBUG,
 1927             "IOC Page 2 Header: Version %x len %x PageNumber %x PageType %x\n",
 1928             hdr.PageVersion, hdr.PageLength << 2,
 1929             hdr.PageNumber, hdr.PageType);
 1930 
 1931         len = hdr.PageLength * sizeof(uint32_t);
 1932         mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 1933         if (mpt->ioc_page2 == NULL) {
 1934                 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
 1935                 mpt_raid_free_mem(mpt);
 1936                 return (ENOMEM);
 1937         }
 1938         memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
 1939         rv = mpt_read_cur_cfg_page(mpt, 0,
 1940             &mpt->ioc_page2->Header, len, FALSE, 5000);
 1941         if (rv) {
 1942                 mpt_prt(mpt, "failed to read IOC Page 2\n");
 1943                 mpt_raid_free_mem(mpt);
 1944                 return (EIO);
 1945         }
 1946         mpt2host_config_page_ioc2(mpt->ioc_page2);
 1947 
 1948         if (mpt->ioc_page2->CapabilitiesFlags != 0) {
 1949                 uint32_t mask;
 1950 
 1951                 mpt_prt(mpt, "Capabilities: (");
 1952                 for (mask = 1; mask != 0; mask <<= 1) {
 1953                         if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
 1954                                 continue;
 1955                         }
 1956                         switch (mask) {
 1957                         case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
 1958                                 mpt_prtc(mpt, " RAID-0");
 1959                                 break;
 1960                         case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
 1961                                 mpt_prtc(mpt, " RAID-1E");
 1962                                 break;
 1963                         case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
 1964                                 mpt_prtc(mpt, " RAID-1");
 1965                                 break;
 1966                         case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
 1967                                 mpt_prtc(mpt, " SES");
 1968                                 break;
 1969                         case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
 1970                                 mpt_prtc(mpt, " SAFTE");
 1971                                 break;
 1972                         case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
 1973                                 mpt_prtc(mpt, " Multi-Channel-Arrays");
 1974                         default:
 1975                                 break;
 1976                         }
 1977                 }
 1978                 mpt_prtc(mpt, " )\n");
 1979                 if ((mpt->ioc_page2->CapabilitiesFlags
 1980                    & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
 1981                     | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
 1982                     | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
 1983                         mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
 1984                                 mpt->ioc_page2->NumActiveVolumes,
 1985                                 mpt->ioc_page2->NumActiveVolumes != 1
 1986                               ? "s " : " ",
 1987                                 mpt->ioc_page2->MaxVolumes);
 1988                         mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
 1989                                 mpt->ioc_page2->NumActivePhysDisks,
 1990                                 mpt->ioc_page2->NumActivePhysDisks != 1
 1991                               ? "s " : " ",
 1992                                 mpt->ioc_page2->MaxPhysDisks);
 1993                 }
 1994         }
 1995 
 1996         len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
 1997         mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 1998         if (mpt->raid_volumes == NULL) {
 1999                 mpt_prt(mpt, "Could not allocate RAID volume data\n");
 2000                 mpt_raid_free_mem(mpt);
 2001                 return (ENOMEM);
 2002         }
 2003 
 2004         /*
 2005          * Copy critical data out of ioc_page2 so that we can
 2006          * safely refresh the page without windows of unreliable
 2007          * data.
 2008          */
 2009         mpt->raid_max_volumes =  mpt->ioc_page2->MaxVolumes;
 2010 
 2011         len = sizeof(*mpt->raid_volumes->config_page) +
 2012             (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
 2013         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
 2014                 mpt_raid = &mpt->raid_volumes[i];
 2015                 mpt_raid->config_page =
 2016                     malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 2017                 if (mpt_raid->config_page == NULL) {
 2018                         mpt_prt(mpt, "Could not allocate RAID page data\n");
 2019                         mpt_raid_free_mem(mpt);
 2020                         return (ENOMEM);
 2021                 }
 2022         }
 2023         mpt->raid_page0_len = len;
 2024 
 2025         len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
 2026         mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 2027         if (mpt->raid_disks == NULL) {
 2028                 mpt_prt(mpt, "Could not allocate RAID disk data\n");
 2029                 mpt_raid_free_mem(mpt);
 2030                 return (ENOMEM);
 2031         }
 2032         mpt->raid_max_disks =  mpt->ioc_page2->MaxPhysDisks;
 2033 
 2034         /*
 2035          * Load page 3.
 2036          */
 2037         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
 2038             3, 0, &hdr, FALSE, 5000);
 2039         if (rv) {
 2040                 mpt_raid_free_mem(mpt);
 2041                 return (EIO);
 2042         }
 2043 
 2044         mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
 2045             hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
 2046 
 2047         len = hdr.PageLength * sizeof(uint32_t);
 2048         mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 2049         if (mpt->ioc_page3 == NULL) {
 2050                 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
 2051                 mpt_raid_free_mem(mpt);
 2052                 return (ENOMEM);
 2053         }
 2054         memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
 2055         rv = mpt_read_cur_cfg_page(mpt, 0,
 2056             &mpt->ioc_page3->Header, len, FALSE, 5000);
 2057         if (rv) {
 2058                 mpt_raid_free_mem(mpt);
 2059                 return (EIO);
 2060         }
 2061         mpt2host_config_page_ioc3(mpt->ioc_page3);
 2062         mpt_raid_wakeup(mpt);
 2063         return (0);
 2064 }
 2065 
 2066 /*
 2067  * Enable IOC port
 2068  */
 2069 static int
 2070 mpt_send_port_enable(struct mpt_softc *mpt, int port)
 2071 {
 2072         request_t       *req;
 2073         MSG_PORT_ENABLE *enable_req;
 2074         int              error;
 2075 
 2076         req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
 2077         if (req == NULL)
 2078                 return (-1);
 2079 
 2080         enable_req = req->req_vbuf;
 2081         memset(enable_req, 0,  MPT_RQSL(mpt));
 2082 
 2083         enable_req->Function   = MPI_FUNCTION_PORT_ENABLE;
 2084         enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
 2085         enable_req->PortNumber = port;
 2086 
 2087         mpt_check_doorbell(mpt);
 2088         mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
 2089 
 2090         mpt_send_cmd(mpt, req);
 2091         error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
 2092             FALSE, (mpt->is_sas || mpt->is_fc)? 300000 : 30000);
 2093         if (error != 0) {
 2094                 mpt_prt(mpt, "port %d enable timed out\n", port);
 2095                 return (-1);
 2096         }
 2097         mpt_free_request(mpt, req);
 2098         mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
 2099         return (0);
 2100 }
 2101 
 2102 /*
 2103  * Enable/Disable asynchronous event reporting.
 2104  */
 2105 static int
 2106 mpt_send_event_request(struct mpt_softc *mpt, int onoff)
 2107 {
 2108         request_t *req;
 2109         MSG_EVENT_NOTIFY *enable_req;
 2110 
 2111         req = mpt_get_request(mpt, FALSE);
 2112         if (req == NULL) {
 2113                 return (ENOMEM);
 2114         }
 2115         enable_req = req->req_vbuf;
 2116         memset(enable_req, 0, sizeof *enable_req);
 2117 
 2118         enable_req->Function   = MPI_FUNCTION_EVENT_NOTIFICATION;
 2119         enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
 2120         enable_req->Switch     = onoff;
 2121 
 2122         mpt_check_doorbell(mpt);
 2123         mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
 2124             onoff ? "en" : "dis");
 2125         /*
 2126          * Send the command off, but don't wait for it.
 2127          */
 2128         mpt_send_cmd(mpt, req);
 2129         return (0);
 2130 }
 2131 
 2132 /*
 2133  * Un-mask the interrupts on the chip.
 2134  */
 2135 void
 2136 mpt_enable_ints(struct mpt_softc *mpt)
 2137 {
 2138 
 2139         /* Unmask every thing except door bell int */
 2140         mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
 2141 }
 2142 
 2143 /*
 2144  * Mask the interrupts on the chip.
 2145  */
 2146 void
 2147 mpt_disable_ints(struct mpt_softc *mpt)
 2148 {
 2149 
 2150         /* Mask all interrupts */
 2151         mpt_write(mpt, MPT_OFFSET_INTR_MASK,
 2152             MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
 2153 }
 2154 
 2155 static void
 2156 mpt_sysctl_attach(struct mpt_softc *mpt)
 2157 {
 2158 #if __FreeBSD_version >= 500000
 2159         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
 2160         struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
 2161 
 2162         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
 2163                        "debug", CTLFLAG_RW, &mpt->verbose, 0,
 2164                        "Debugging/Verbose level");
 2165         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
 2166                        "role", CTLFLAG_RD, &mpt->role, 0,
 2167                        "HBA role");
 2168 #ifdef  MPT_TEST_MULTIPATH
 2169         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
 2170                        "failure_id", CTLFLAG_RW, &mpt->failure_id, -1,
 2171                        "Next Target to Fail");
 2172 #endif
 2173 #endif
 2174 }
 2175 
 2176 int
 2177 mpt_attach(struct mpt_softc *mpt)
 2178 {
 2179         struct mpt_personality *pers;
 2180         int i;
 2181         int error;
 2182 
 2183         mpt_core_attach(mpt);
 2184         mpt_core_enable(mpt);
 2185 
 2186         TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links);
 2187         for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
 2188                 pers = mpt_personalities[i];
 2189                 if (pers == NULL) {
 2190                         continue;
 2191                 }
 2192                 if (pers->probe(mpt) == 0) {
 2193                         error = pers->attach(mpt);
 2194                         if (error != 0) {
 2195                                 mpt_detach(mpt);
 2196                                 return (error);
 2197                         }
 2198                         mpt->mpt_pers_mask |= (0x1 << pers->id);
 2199                         pers->use_count++;
 2200                 }
 2201         }
 2202 
 2203         /*
 2204          * Now that we've attached everything, do the enable function
 2205          * for all of the personalities. This allows the personalities
 2206          * to do setups that are appropriate for them prior to enabling
 2207          * any ports.
 2208          */
 2209         for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
 2210                 pers = mpt_personalities[i];
 2211                 if (pers != NULL  && MPT_PERS_ATTACHED(pers, mpt) != 0) {
 2212                         error = pers->enable(mpt);
 2213                         if (error != 0) {
 2214                                 mpt_prt(mpt, "personality %s attached but would"
 2215                                     " not enable (%d)\n", pers->name, error);
 2216                                 mpt_detach(mpt);
 2217                                 return (error);
 2218                         }
 2219                 }
 2220         }
 2221         return (0);
 2222 }
 2223 
 2224 int
 2225 mpt_shutdown(struct mpt_softc *mpt)
 2226 {
 2227         struct mpt_personality *pers;
 2228 
 2229         MPT_PERS_FOREACH_REVERSE(mpt, pers) {
 2230                 pers->shutdown(mpt);
 2231         }
 2232         return (0);
 2233 }
 2234 
 2235 int
 2236 mpt_detach(struct mpt_softc *mpt)
 2237 {
 2238         struct mpt_personality *pers;
 2239 
 2240         MPT_PERS_FOREACH_REVERSE(mpt, pers) {
 2241                 pers->detach(mpt);
 2242                 mpt->mpt_pers_mask &= ~(0x1 << pers->id);
 2243                 pers->use_count--;
 2244         }
 2245         TAILQ_REMOVE(&mpt_tailq, mpt, links);
 2246         return (0);
 2247 }
 2248 
 2249 static int
 2250 mpt_core_load(struct mpt_personality *pers)
 2251 {
 2252         int i;
 2253 
 2254         /*
 2255          * Setup core handlers and insert the default handler
 2256          * into all "empty slots".
 2257          */
 2258         for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
 2259                 mpt_reply_handlers[i] = mpt_default_reply_handler;
 2260         }
 2261 
 2262         mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
 2263             mpt_event_reply_handler;
 2264         mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
 2265             mpt_config_reply_handler;
 2266         mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
 2267             mpt_handshake_reply_handler;
 2268         return (0);
 2269 }
 2270 
 2271 /*
 2272  * Initialize per-instance driver data and perform
 2273  * initial controller configuration.
 2274  */
 2275 static int
 2276 mpt_core_attach(struct mpt_softc *mpt)
 2277 {
 2278         int val, error;
 2279 
 2280         LIST_INIT(&mpt->ack_frames);
 2281         /* Put all request buffers on the free list */
 2282         TAILQ_INIT(&mpt->request_pending_list);
 2283         TAILQ_INIT(&mpt->request_free_list);
 2284         TAILQ_INIT(&mpt->request_timeout_list);
 2285         for (val = 0; val < MPT_MAX_LUNS; val++) {
 2286                 STAILQ_INIT(&mpt->trt[val].atios);
 2287                 STAILQ_INIT(&mpt->trt[val].inots);
 2288         }
 2289         STAILQ_INIT(&mpt->trt_wildcard.atios);
 2290         STAILQ_INIT(&mpt->trt_wildcard.inots);
 2291 #ifdef  MPT_TEST_MULTIPATH
 2292         mpt->failure_id = -1;
 2293 #endif
 2294         mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
 2295         mpt_sysctl_attach(mpt);
 2296         mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
 2297             mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
 2298 
 2299         MPT_LOCK(mpt);
 2300         error = mpt_configure_ioc(mpt, 0, 0);
 2301         MPT_UNLOCK(mpt);
 2302 
 2303         return (error);
 2304 }
 2305 
 2306 static int
 2307 mpt_core_enable(struct mpt_softc *mpt)
 2308 {
 2309 
 2310         /*
 2311          * We enter with the IOC enabled, but async events
 2312          * not enabled, ports not enabled and interrupts
 2313          * not enabled.
 2314          */
 2315         MPT_LOCK(mpt);
 2316 
 2317         /*
 2318          * Enable asynchronous event reporting- all personalities
 2319          * have attached so that they should be able to now field
 2320          * async events.
 2321          */
 2322         mpt_send_event_request(mpt, 1);
 2323 
 2324         /*
 2325          * Catch any pending interrupts
 2326          *
 2327          * This seems to be crucial- otherwise
 2328          * the portenable below times out.
 2329          */
 2330         mpt_intr(mpt);
 2331 
 2332         /*
 2333          * Enable Interrupts
 2334          */
 2335         mpt_enable_ints(mpt);
 2336 
 2337         /*
 2338          * Catch any pending interrupts
 2339          *
 2340          * This seems to be crucial- otherwise
 2341          * the portenable below times out.
 2342          */
 2343         mpt_intr(mpt);
 2344 
 2345         /*
 2346          * Enable the port.
 2347          */
 2348         if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
 2349                 mpt_prt(mpt, "failed to enable port 0\n");
 2350                 MPT_UNLOCK(mpt);
 2351                 return (ENXIO);
 2352         }
 2353         MPT_UNLOCK(mpt);
 2354         return (0);
 2355 }
 2356 
 2357 static void
 2358 mpt_core_shutdown(struct mpt_softc *mpt)
 2359 {
 2360 
 2361         mpt_disable_ints(mpt);
 2362 }
 2363 
 2364 static void
 2365 mpt_core_detach(struct mpt_softc *mpt)
 2366 {
 2367         int val;
 2368 
 2369         /*
 2370          * XXX: FREE MEMORY 
 2371          */
 2372         mpt_disable_ints(mpt);
 2373 
 2374         /* Make sure no request has pending timeouts. */
 2375         for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
 2376                 request_t *req = &mpt->request_pool[val];
 2377                 mpt_callout_drain(mpt, &req->callout);
 2378         }
 2379 
 2380         mpt_dma_buf_free(mpt);
 2381 }
 2382 
 2383 static int
 2384 mpt_core_unload(struct mpt_personality *pers)
 2385 {
 2386 
 2387         /* Unload is always successful. */
 2388         return (0);
 2389 }
 2390 
 2391 #define FW_UPLOAD_REQ_SIZE                              \
 2392         (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION)  \
 2393        + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
 2394 
 2395 static int
 2396 mpt_upload_fw(struct mpt_softc *mpt)
 2397 {
 2398         uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
 2399         MSG_FW_UPLOAD_REPLY fw_reply;
 2400         MSG_FW_UPLOAD *fw_req;
 2401         FW_UPLOAD_TCSGE *tsge;
 2402         SGE_SIMPLE32 *sge;
 2403         uint32_t flags;
 2404         int error;
 2405         
 2406         memset(&fw_req_buf, 0, sizeof(fw_req_buf));
 2407         fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
 2408         fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
 2409         fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
 2410         fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
 2411         tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
 2412         tsge->DetailsLength = 12;
 2413         tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
 2414         tsge->ImageSize = htole32(mpt->fw_image_size);
 2415         sge = (SGE_SIMPLE32 *)(tsge + 1);
 2416         flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
 2417               | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
 2418               | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
 2419         flags <<= MPI_SGE_FLAGS_SHIFT;
 2420         sge->FlagsLength = htole32(flags | mpt->fw_image_size);
 2421         sge->Address = htole32(mpt->fw_phys);
 2422         bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREREAD);
 2423         error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
 2424         if (error)
 2425                 return(error);
 2426         error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
 2427         bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTREAD);
 2428         return (error);
 2429 }
 2430 
 2431 static void
 2432 mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
 2433                uint32_t *data, bus_size_t len)
 2434 {
 2435         uint32_t *data_end;
 2436 
 2437         data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
 2438         if (mpt->is_sas) {
 2439                 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
 2440         }
 2441         mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
 2442         while (data != data_end) {
 2443                 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
 2444                 data++;
 2445         }
 2446         if (mpt->is_sas) {
 2447                 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
 2448         }
 2449 }
 2450 
 2451 static int
 2452 mpt_download_fw(struct mpt_softc *mpt)
 2453 {
 2454         MpiFwHeader_t *fw_hdr;
 2455         int error;
 2456         uint32_t ext_offset;
 2457         uint32_t data;
 2458 
 2459         if (mpt->pci_pio_reg == NULL) {
 2460                 mpt_prt(mpt, "No PIO resource!\n");
 2461                 return (ENXIO);
 2462         }
 2463 
 2464         mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
 2465                 mpt->fw_image_size);
 2466 
 2467         error = mpt_enable_diag_mode(mpt);
 2468         if (error != 0) {
 2469                 mpt_prt(mpt, "Could not enter diagnostic mode!\n");
 2470                 return (EIO);
 2471         }
 2472 
 2473         mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
 2474                   MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
 2475 
 2476         fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
 2477         bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREWRITE);
 2478         mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
 2479                        fw_hdr->ImageSize);
 2480         bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTWRITE);
 2481 
 2482         ext_offset = fw_hdr->NextImageHeaderOffset;
 2483         while (ext_offset != 0) {
 2484                 MpiExtImageHeader_t *ext;
 2485 
 2486                 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
 2487                 ext_offset = ext->NextImageHeaderOffset;
 2488                 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap,
 2489                     BUS_DMASYNC_PREWRITE);
 2490                 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
 2491                                ext->ImageSize);
 2492                 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap,
 2493                     BUS_DMASYNC_POSTWRITE);
 2494         }
 2495 
 2496         if (mpt->is_sas) {
 2497                 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
 2498         }
 2499         /* Setup the address to jump to on reset. */
 2500         mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
 2501         mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
 2502 
 2503         /*
 2504          * The controller sets the "flash bad" status after attempting
 2505          * to auto-boot from flash.  Clear the status so that the controller
 2506          * will continue the boot process with our newly installed firmware.
 2507          */
 2508         mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
 2509         data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
 2510         mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
 2511         mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
 2512 
 2513         if (mpt->is_sas) {
 2514                 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
 2515         }
 2516 
 2517         /*
 2518          * Re-enable the processor and clear the boot halt flag.
 2519          */
 2520         data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
 2521         data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
 2522         mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
 2523 
 2524         mpt_disable_diag_mode(mpt);
 2525         return (0);
 2526 }
 2527 
 2528 static int
 2529 mpt_dma_buf_alloc(struct mpt_softc *mpt)
 2530 {
 2531         struct mpt_map_info mi;
 2532         uint8_t *vptr;
 2533         uint32_t pptr, end;
 2534         int i, error;
 2535 
 2536         /* Create a child tag for data buffers */
 2537         if (mpt_dma_tag_create(mpt, mpt->parent_dmat, 1,
 2538             0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
 2539             NULL, NULL, (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE,
 2540             mpt->max_cam_seg_cnt, BUS_SPACE_MAXSIZE_32BIT, 0,
 2541             &mpt->buffer_dmat) != 0) {
 2542                 mpt_prt(mpt, "cannot create a dma tag for data buffers\n");
 2543                 return (1);
 2544         }
 2545 
 2546         /* Create a child tag for request buffers */
 2547         if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0,
 2548             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
 2549             NULL, NULL, MPT_REQ_MEM_SIZE(mpt), 1, BUS_SPACE_MAXSIZE_32BIT, 0,
 2550             &mpt->request_dmat) != 0) {
 2551                 mpt_prt(mpt, "cannot create a dma tag for requests\n");
 2552                 return (1);
 2553         }
 2554 
 2555         /* Allocate some DMA accessible memory for requests */
 2556         if (bus_dmamem_alloc(mpt->request_dmat, (void **)&mpt->request,
 2557             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &mpt->request_dmap) != 0) {
 2558                 mpt_prt(mpt, "cannot allocate %d bytes of request memory\n",
 2559                     MPT_REQ_MEM_SIZE(mpt));
 2560                 return (1);
 2561         }
 2562 
 2563         mi.mpt = mpt;
 2564         mi.error = 0;
 2565 
 2566         /* Load and lock it into "bus space" */
 2567         bus_dmamap_load(mpt->request_dmat, mpt->request_dmap, mpt->request,
 2568             MPT_REQ_MEM_SIZE(mpt), mpt_map_rquest, &mi, 0);
 2569 
 2570         if (mi.error) {
 2571                 mpt_prt(mpt, "error %d loading dma map for DMA request queue\n",
 2572                     mi.error);
 2573                 return (1);
 2574         }
 2575         mpt->request_phys = mi.phys;
 2576 
 2577         /*
 2578          * Now create per-request dma maps
 2579          */
 2580         i = 0;
 2581         pptr =  mpt->request_phys;
 2582         vptr =  mpt->request;
 2583         end = pptr + MPT_REQ_MEM_SIZE(mpt);
 2584         while(pptr < end) {
 2585                 request_t *req = &mpt->request_pool[i];
 2586                 req->index = i++;
 2587 
 2588                 /* Store location of Request Data */
 2589                 req->req_pbuf = pptr;
 2590                 req->req_vbuf = vptr;
 2591 
 2592                 pptr += MPT_REQUEST_AREA;
 2593                 vptr += MPT_REQUEST_AREA;
 2594 
 2595                 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
 2596                 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
 2597 
 2598                 error = bus_dmamap_create(mpt->buffer_dmat, 0, &req->dmap);
 2599                 if (error) {
 2600                         mpt_prt(mpt, "error %d creating per-cmd DMA maps\n",
 2601                             error);
 2602                         return (1);
 2603                 }
 2604         }
 2605 
 2606         return (0);
 2607 }
 2608 
 2609 static void
 2610 mpt_dma_buf_free(struct mpt_softc *mpt)
 2611 {
 2612         int i;
 2613 
 2614         if (mpt->request_dmat == 0) {
 2615                 mpt_lprt(mpt, MPT_PRT_DEBUG, "already released dma memory\n");
 2616                 return;
 2617         }
 2618         for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
 2619                 bus_dmamap_destroy(mpt->buffer_dmat, mpt->request_pool[i].dmap);
 2620         }
 2621         bus_dmamap_unload(mpt->request_dmat, mpt->request_dmap);
 2622         bus_dmamem_free(mpt->request_dmat, mpt->request, mpt->request_dmap);
 2623         bus_dma_tag_destroy(mpt->request_dmat);
 2624         mpt->request_dmat = 0;
 2625         bus_dma_tag_destroy(mpt->buffer_dmat);
 2626 }
 2627 
 2628 /*
 2629  * Allocate/Initialize data structures for the controller.  Called
 2630  * once at instance startup.
 2631  */
 2632 static int
 2633 mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset)
 2634 {
 2635         PTR_MSG_PORT_FACTS_REPLY pfp;
 2636         int error, port, val;
 2637         size_t len;
 2638 
 2639         if (tn == MPT_MAX_TRYS) {
 2640                 return (-1);
 2641         }
 2642 
 2643         /*
 2644          * No need to reset if the IOC is already in the READY state.
 2645          *
 2646          * Force reset if initialization failed previously.
 2647          * Note that a hard_reset of the second channel of a '929
 2648          * will stop operation of the first channel.  Hopefully, if the
 2649          * first channel is ok, the second will not require a hard
 2650          * reset.
 2651          */
 2652         if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) {
 2653                 if (mpt_reset(mpt, FALSE) != MPT_OK) {
 2654                         return (mpt_configure_ioc(mpt, tn++, 1));
 2655                 }
 2656                 needreset = 0;
 2657         }
 2658 
 2659         if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) {
 2660                 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
 2661                 return (mpt_configure_ioc(mpt, tn++, 1));
 2662         }
 2663         mpt2host_iocfacts_reply(&mpt->ioc_facts);
 2664 
 2665         mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
 2666             mpt->ioc_facts.MsgVersion >> 8,
 2667             mpt->ioc_facts.MsgVersion & 0xFF,
 2668             mpt->ioc_facts.HeaderVersion >> 8,
 2669             mpt->ioc_facts.HeaderVersion & 0xFF);
 2670 
 2671         /*
 2672          * Now that we know request frame size, we can calculate
 2673          * the actual (reasonable) segment limit for read/write I/O.
 2674          *
 2675          * This limit is constrained by:
 2676          *
 2677          *  + The size of each area we allocate per command (and how
 2678          *    many chain segments we can fit into it).
 2679          *  + The total number of areas we've set up.
 2680          *  + The actual chain depth the card will allow.
 2681          *
 2682          * The first area's segment count is limited by the I/O request
 2683          * at the head of it. We cannot allocate realistically more
 2684          * than MPT_MAX_REQUESTS areas. Therefore, to account for both
 2685          * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
 2686          *
 2687          */
 2688         /* total number of request areas we (can) allocate */
 2689         mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
 2690 
 2691         /* converted to the number of chain areas possible */
 2692         mpt->max_seg_cnt *= MPT_NRFM(mpt);
 2693 
 2694         /* limited by the number of chain areas the card will support */
 2695         if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) {
 2696                 mpt_lprt(mpt, MPT_PRT_INFO,
 2697                     "chain depth limited to %u (from %u)\n",
 2698                     mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt);
 2699                 mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth;
 2700         }
 2701 
 2702         /* converted to the number of simple sges in chain segments. */
 2703         mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
 2704 
 2705         /*
 2706          * Use this as the basis for reporting the maximum I/O size to CAM.
 2707          */
 2708         mpt->max_cam_seg_cnt = min(mpt->max_seg_cnt, (MAXPHYS / PAGE_SIZE) + 1);
 2709 
 2710         error = mpt_dma_buf_alloc(mpt);
 2711         if (error != 0) {
 2712                 mpt_prt(mpt, "mpt_dma_buf_alloc() failed!\n");
 2713                 return (EIO);
 2714         }
 2715 
 2716         for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
 2717                 request_t *req = &mpt->request_pool[val];
 2718                 req->state = REQ_STATE_ALLOCATED;
 2719                 mpt_callout_init(mpt, &req->callout);
 2720                 mpt_free_request(mpt, req);
 2721         }
 2722 
 2723         mpt_lprt(mpt, MPT_PRT_INFO, "Maximum Segment Count: %u, Maximum "
 2724                  "CAM Segment Count: %u\n", mpt->max_seg_cnt,
 2725                  mpt->max_cam_seg_cnt);
 2726 
 2727         mpt_lprt(mpt, MPT_PRT_INFO, "MsgLength=%u IOCNumber = %d\n",
 2728             mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber);
 2729         mpt_lprt(mpt, MPT_PRT_INFO,
 2730             "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
 2731             "Request Frame Size %u bytes Max Chain Depth %u\n",
 2732             mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize,
 2733             mpt->ioc_facts.RequestFrameSize << 2,
 2734             mpt->ioc_facts.MaxChainDepth);
 2735         mpt_lprt(mpt, MPT_PRT_INFO, "IOCFACTS: Num Ports %d, FWImageSize %d, "
 2736             "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts,
 2737             mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags);
 2738 
 2739         len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY);
 2740         mpt->port_facts = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
 2741         if (mpt->port_facts == NULL) {
 2742                 mpt_prt(mpt, "unable to allocate memory for port facts\n");
 2743                 return (ENOMEM);
 2744         }
 2745 
 2746 
 2747         if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) &&
 2748             (mpt->fw_uploaded == 0)) {
 2749                 struct mpt_map_info mi;
 2750 
 2751                 /*
 2752                  * In some configurations, the IOC's firmware is
 2753                  * stored in a shared piece of system NVRAM that
 2754                  * is only accessible via the BIOS.  In this
 2755                  * case, the firmware keeps a copy of firmware in
 2756                  * RAM until the OS driver retrieves it.  Once
 2757                  * retrieved, we are responsible for re-downloading
 2758                  * the firmware after any hard-reset.
 2759                  */
 2760                 mpt->fw_image_size = mpt->ioc_facts.FWImageSize;
 2761                 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
 2762                     BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 2763                     mpt->fw_image_size, 1, mpt->fw_image_size, 0,
 2764                     &mpt->fw_dmat);
 2765                 if (error != 0) {
 2766                         mpt_prt(mpt, "cannot create firmware dma tag\n");
 2767                         return (ENOMEM);
 2768                 }
 2769                 error = bus_dmamem_alloc(mpt->fw_dmat,
 2770                     (void **)&mpt->fw_image, BUS_DMA_NOWAIT |
 2771                     BUS_DMA_COHERENT, &mpt->fw_dmap);
 2772                 if (error != 0) {
 2773                         mpt_prt(mpt, "cannot allocate firmware memory\n");
 2774                         bus_dma_tag_destroy(mpt->fw_dmat);
 2775                         return (ENOMEM);
 2776                 }
 2777                 mi.mpt = mpt;
 2778                 mi.error = 0;
 2779                 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
 2780                     mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0);
 2781                 mpt->fw_phys = mi.phys;
 2782 
 2783                 error = mpt_upload_fw(mpt);
 2784                 if (error != 0) {
 2785                         mpt_prt(mpt, "firmware upload failed.\n");
 2786                         bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
 2787                         bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
 2788                             mpt->fw_dmap);
 2789                         bus_dma_tag_destroy(mpt->fw_dmat);
 2790                         mpt->fw_image = NULL;
 2791                         return (EIO);
 2792                 }
 2793                 mpt->fw_uploaded = 1;
 2794         }
 2795 
 2796         for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) {
 2797                 pfp = &mpt->port_facts[port];
 2798                 error = mpt_get_portfacts(mpt, 0, pfp);
 2799                 if (error != MPT_OK) {
 2800                         mpt_prt(mpt,
 2801                             "mpt_get_portfacts on port %d failed\n", port);
 2802                         free(mpt->port_facts, M_DEVBUF);
 2803                         mpt->port_facts = NULL;
 2804                         return (mpt_configure_ioc(mpt, tn++, 1));
 2805                 }
 2806                 mpt2host_portfacts_reply(pfp);
 2807 
 2808                 if (port > 0) {
 2809                         error = MPT_PRT_INFO;
 2810                 } else {
 2811                         error = MPT_PRT_DEBUG;
 2812                 }
 2813                 mpt_lprt(mpt, error,
 2814                     "PORTFACTS[%d]: Type %x PFlags %x IID %d MaxDev %d\n",
 2815                     port, pfp->PortType, pfp->ProtocolFlags, pfp->PortSCSIID,
 2816                     pfp->MaxDevices);
 2817 
 2818         }
 2819 
 2820         /*
 2821          * XXX: Not yet supporting more than port 0
 2822          */
 2823         pfp = &mpt->port_facts[0];
 2824         if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_FC) {
 2825                 mpt->is_fc = 1;
 2826                 mpt->is_sas = 0;
 2827                 mpt->is_spi = 0;
 2828         } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
 2829                 mpt->is_fc = 0;
 2830                 mpt->is_sas = 1;
 2831                 mpt->is_spi = 0;
 2832         } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SCSI) {
 2833                 mpt->is_fc = 0;
 2834                 mpt->is_sas = 0;
 2835                 mpt->is_spi = 1;
 2836                 if (mpt->mpt_ini_id == MPT_INI_ID_NONE)
 2837                         mpt->mpt_ini_id = pfp->PortSCSIID;
 2838         } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_ISCSI) {
 2839                 mpt_prt(mpt, "iSCSI not supported yet\n");
 2840                 return (ENXIO);
 2841         } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_INACTIVE) {
 2842                 mpt_prt(mpt, "Inactive Port\n");
 2843                 return (ENXIO);
 2844         } else {
 2845                 mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType);
 2846                 return (ENXIO);
 2847         }
 2848 
 2849         /*
 2850          * Set our role with what this port supports.
 2851          *
 2852          * Note this might be changed later in different modules
 2853          * if this is different from what is wanted.
 2854          */
 2855         mpt->role = MPT_ROLE_NONE;
 2856         if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
 2857                 mpt->role |= MPT_ROLE_INITIATOR;
 2858         }
 2859         if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
 2860                 mpt->role |= MPT_ROLE_TARGET;
 2861         }
 2862 
 2863         /*
 2864          * Enable the IOC
 2865          */
 2866         if (mpt_enable_ioc(mpt, 1) != MPT_OK) {
 2867                 mpt_prt(mpt, "unable to initialize IOC\n");
 2868                 return (ENXIO);
 2869         }
 2870 
 2871         /*
 2872          * Read IOC configuration information.
 2873          *
 2874          * We need this to determine whether or not we have certain
 2875          * settings for Integrated Mirroring (e.g.).
 2876          */
 2877         mpt_read_config_info_ioc(mpt);
 2878 
 2879         return (0);
 2880 }
 2881 
 2882 static int
 2883 mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
 2884 {
 2885         uint32_t pptr;
 2886         int val;
 2887 
 2888         if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
 2889                 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
 2890                 return (EIO);
 2891         }
 2892 
 2893         mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
 2894 
 2895         if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
 2896                 mpt_prt(mpt, "IOC failed to go to run state\n");
 2897                 return (ENXIO);
 2898         }
 2899         mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
 2900 
 2901         /*
 2902          * Give it reply buffers
 2903          *
 2904          * Do *not* exceed global credits.
 2905          */
 2906         for (val = 0, pptr = mpt->reply_phys;
 2907             (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
 2908              pptr += MPT_REPLY_SIZE) {
 2909                 mpt_free_reply(mpt, pptr);
 2910                 if (++val == mpt->ioc_facts.GlobalCredits - 1)
 2911                         break;
 2912         }
 2913 
 2914 
 2915         /*
 2916          * Enable the port if asked. This is only done if we're resetting
 2917          * the IOC after initial startup.
 2918          */
 2919         if (portenable) {
 2920                 /*
 2921                  * Enable asynchronous event reporting
 2922                  */
 2923                 mpt_send_event_request(mpt, 1);
 2924 
 2925                 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
 2926                         mpt_prt(mpt, "%s: failed to enable port 0\n", __func__);
 2927                         return (ENXIO);
 2928                 }
 2929         }
 2930         return (MPT_OK);
 2931 }
 2932 
 2933 /*
 2934  * Endian Conversion Functions- only used on Big Endian machines
 2935  */
 2936 #if     _BYTE_ORDER == _BIG_ENDIAN
 2937 void
 2938 mpt2host_sge_simple_union(SGE_SIMPLE_UNION *sge)
 2939 {
 2940 
 2941         MPT_2_HOST32(sge, FlagsLength);
 2942         MPT_2_HOST32(sge, u.Address64.Low);
 2943         MPT_2_HOST32(sge, u.Address64.High);
 2944 }
 2945 
 2946 void
 2947 mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *rp)
 2948 {
 2949 
 2950         MPT_2_HOST16(rp, MsgVersion);
 2951         MPT_2_HOST16(rp, HeaderVersion);
 2952         MPT_2_HOST32(rp, MsgContext);
 2953         MPT_2_HOST16(rp, IOCExceptions);
 2954         MPT_2_HOST16(rp, IOCStatus);
 2955         MPT_2_HOST32(rp, IOCLogInfo);
 2956         MPT_2_HOST16(rp, ReplyQueueDepth);
 2957         MPT_2_HOST16(rp, RequestFrameSize);
 2958         MPT_2_HOST16(rp, Reserved_0101_FWVersion);
 2959         MPT_2_HOST16(rp, ProductID);
 2960         MPT_2_HOST32(rp, CurrentHostMfaHighAddr);
 2961         MPT_2_HOST16(rp, GlobalCredits);
 2962         MPT_2_HOST32(rp, CurrentSenseBufferHighAddr);
 2963         MPT_2_HOST16(rp, CurReplyFrameSize);
 2964         MPT_2_HOST32(rp, FWImageSize);
 2965         MPT_2_HOST32(rp, IOCCapabilities);
 2966         MPT_2_HOST32(rp, FWVersion.Word);
 2967         MPT_2_HOST16(rp, HighPriorityQueueDepth);
 2968         MPT_2_HOST16(rp, Reserved2);
 2969         mpt2host_sge_simple_union(&rp->HostPageBufferSGE);
 2970         MPT_2_HOST32(rp, ReplyFifoHostSignalingAddr);
 2971 }
 2972 
 2973 void
 2974 mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *pfp)
 2975 {
 2976 
 2977         MPT_2_HOST16(pfp, Reserved);
 2978         MPT_2_HOST16(pfp, Reserved1);
 2979         MPT_2_HOST32(pfp, MsgContext);
 2980         MPT_2_HOST16(pfp, Reserved2);
 2981         MPT_2_HOST16(pfp, IOCStatus);
 2982         MPT_2_HOST32(pfp, IOCLogInfo);
 2983         MPT_2_HOST16(pfp, MaxDevices);
 2984         MPT_2_HOST16(pfp, PortSCSIID);
 2985         MPT_2_HOST16(pfp, ProtocolFlags);
 2986         MPT_2_HOST16(pfp, MaxPostedCmdBuffers);
 2987         MPT_2_HOST16(pfp, MaxPersistentIDs);
 2988         MPT_2_HOST16(pfp, MaxLanBuckets);
 2989         MPT_2_HOST16(pfp, Reserved4);
 2990         MPT_2_HOST32(pfp, Reserved5);
 2991 }
 2992 
 2993 void
 2994 mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *ioc2)
 2995 {
 2996         int i;
 2997 
 2998         MPT_2_HOST32(ioc2, CapabilitiesFlags);
 2999         for (i = 0; i < MPI_IOC_PAGE_2_RAID_VOLUME_MAX; i++) {
 3000                 MPT_2_HOST16(ioc2, RaidVolume[i].Reserved3);
 3001         }
 3002 }
 3003 
 3004 void
 3005 mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *ioc3)
 3006 {
 3007 
 3008         MPT_2_HOST16(ioc3, Reserved2);
 3009 }
 3010 
 3011 void
 3012 mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *sp0)
 3013 {
 3014 
 3015         MPT_2_HOST32(sp0, Capabilities);
 3016         MPT_2_HOST32(sp0, PhysicalInterface);
 3017 }
 3018 
 3019 void
 3020 mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
 3021 {
 3022 
 3023         MPT_2_HOST32(sp1, Configuration);
 3024         MPT_2_HOST32(sp1, OnBusTimerValue);
 3025         MPT_2_HOST16(sp1, IDConfig);
 3026 }
 3027 
 3028 void
 3029 host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
 3030 {
 3031 
 3032         HOST_2_MPT32(sp1, Configuration);
 3033         HOST_2_MPT32(sp1, OnBusTimerValue);
 3034         HOST_2_MPT16(sp1, IDConfig);
 3035 }
 3036 
 3037 void
 3038 mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *sp2)
 3039 {
 3040         int i;
 3041 
 3042         MPT_2_HOST32(sp2, PortFlags);
 3043         MPT_2_HOST32(sp2, PortSettings);
 3044         for (i = 0; i < sizeof(sp2->DeviceSettings) /
 3045             sizeof(*sp2->DeviceSettings); i++) {
 3046                 MPT_2_HOST16(sp2, DeviceSettings[i].DeviceFlags);
 3047         }
 3048 }
 3049 
 3050 void
 3051 mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *sd0)
 3052 {
 3053 
 3054         MPT_2_HOST32(sd0, NegotiatedParameters);
 3055         MPT_2_HOST32(sd0, Information);
 3056 }
 3057 
 3058 void
 3059 mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
 3060 {
 3061 
 3062         MPT_2_HOST32(sd1, RequestedParameters);
 3063         MPT_2_HOST32(sd1, Reserved);
 3064         MPT_2_HOST32(sd1, Configuration);
 3065 }
 3066 
 3067 void
 3068 host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
 3069 {
 3070 
 3071         HOST_2_MPT32(sd1, RequestedParameters);
 3072         HOST_2_MPT32(sd1, Reserved);
 3073         HOST_2_MPT32(sd1, Configuration);
 3074 }
 3075 
 3076 void
 3077 mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *fp0)
 3078 {
 3079 
 3080         MPT_2_HOST32(fp0, Flags);
 3081         MPT_2_HOST32(fp0, PortIdentifier);
 3082         MPT_2_HOST32(fp0, WWNN.Low);
 3083         MPT_2_HOST32(fp0, WWNN.High);
 3084         MPT_2_HOST32(fp0, WWPN.Low);
 3085         MPT_2_HOST32(fp0, WWPN.High);
 3086         MPT_2_HOST32(fp0, SupportedServiceClass);
 3087         MPT_2_HOST32(fp0, SupportedSpeeds);
 3088         MPT_2_HOST32(fp0, CurrentSpeed);
 3089         MPT_2_HOST32(fp0, MaxFrameSize);
 3090         MPT_2_HOST32(fp0, FabricWWNN.Low);
 3091         MPT_2_HOST32(fp0, FabricWWNN.High);
 3092         MPT_2_HOST32(fp0, FabricWWPN.Low);
 3093         MPT_2_HOST32(fp0, FabricWWPN.High);
 3094         MPT_2_HOST32(fp0, DiscoveredPortsCount);
 3095         MPT_2_HOST32(fp0, MaxInitiators);
 3096 }
 3097 
 3098 void
 3099 mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
 3100 {
 3101 
 3102         MPT_2_HOST32(fp1, Flags);
 3103         MPT_2_HOST32(fp1, NoSEEPROMWWNN.Low);
 3104         MPT_2_HOST32(fp1, NoSEEPROMWWNN.High);
 3105         MPT_2_HOST32(fp1, NoSEEPROMWWPN.Low);
 3106         MPT_2_HOST32(fp1, NoSEEPROMWWPN.High);
 3107 }
 3108 
 3109 void
 3110 host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
 3111 {
 3112 
 3113         HOST_2_MPT32(fp1, Flags);
 3114         HOST_2_MPT32(fp1, NoSEEPROMWWNN.Low);
 3115         HOST_2_MPT32(fp1, NoSEEPROMWWNN.High);
 3116         HOST_2_MPT32(fp1, NoSEEPROMWWPN.Low);
 3117         HOST_2_MPT32(fp1, NoSEEPROMWWPN.High);
 3118 }
 3119 
 3120 void
 3121 mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *volp)
 3122 {
 3123         int i;
 3124 
 3125         MPT_2_HOST16(volp, VolumeStatus.Reserved);
 3126         MPT_2_HOST16(volp, VolumeSettings.Settings);
 3127         MPT_2_HOST32(volp, MaxLBA);
 3128         MPT_2_HOST32(volp, MaxLBAHigh);
 3129         MPT_2_HOST32(volp, StripeSize);
 3130         MPT_2_HOST32(volp, Reserved2);
 3131         MPT_2_HOST32(volp, Reserved3);
 3132         for (i = 0; i < MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX; i++) {
 3133                 MPT_2_HOST16(volp, PhysDisk[i].Reserved);
 3134         }
 3135 }
 3136 
 3137 void
 3138 mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *rpd0)
 3139 {
 3140 
 3141         MPT_2_HOST32(rpd0, Reserved1);
 3142         MPT_2_HOST16(rpd0, PhysDiskStatus.Reserved);
 3143         MPT_2_HOST32(rpd0, MaxLBA);
 3144         MPT_2_HOST16(rpd0, ErrorData.Reserved);
 3145         MPT_2_HOST16(rpd0, ErrorData.ErrorCount);
 3146         MPT_2_HOST16(rpd0, ErrorData.SmartCount);
 3147 }
 3148 
 3149 void
 3150 mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *vi)
 3151 {
 3152 
 3153         MPT_2_HOST16(vi, TotalBlocks.High);
 3154         MPT_2_HOST16(vi, TotalBlocks.Low);
 3155         MPT_2_HOST16(vi, BlocksRemaining.High);
 3156         MPT_2_HOST16(vi, BlocksRemaining.Low);
 3157 }
 3158 #endif

Cache object: 2dd6808374dd450332367ecf70a63aad


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.